In [2]:
%matplotlib inline

Calculations of distributions of proplyd distances and sizes and angles. These translate into distributions of $\beta$.

Data sources

We have the list of proplyds from Henney & Arthur (1998) that we used in Fig 12 of Tsamis et al (2013). This has distances and sizes.

We also have the Ricci et al (2008) catalog, which has distances but not sizes.

In [3]:
from astropy.table import Table
import numpy as np
import seaborn as sns
from scipy import stats

Now use seaborn to get nicer graphs.

In [4]:
sns.set_palette("deep", desat=0.6)
sns.set_context("talk", rc={"figure.figsize": (12, 8)})

Henney & Arthur (1998) sample

In [5]:
datafile = "Proplyd-Datasets/vicente-vs-HA98.dat"
data = Table.read(datafile, format='ascii')
In [6]:
data
Out[6]:
IDdr14VicR14
163-3232.142.219.4
161-3246.053.525.8
168-3286.642.821.0
163-3176.915.027.1
166-3167.012.514.8
161-3287.749.120.0
167-3177.837.935.8
158-3239.426.330.6
158-3269.611.331.6
161-31410.245.317.1
158-32710.616.635.5
............
153-32116.971.28.4
159-33817.25.013.5
171-34019.1123.323.2
152-31919.1618.214.8
155-33820.4817.029.7
173-34122.484.118.1
180-33125.1212.229.3
177-34125.8420.451.6
159-35028.3520.144.5
182-41356.736.948.7
244-440142.3104.0180.6
In [7]:
from matplotlib import pyplot as plt
In [8]:
_ = plt.hist(data["d"], bins=30, range=(0, 30), cumulative=True)
_ = plt.xlabel('Projected distance D\', arcsec')
_ = plt.ylabel('# of proplyds with projected distance < D\'')
_ = plt.title('Cumulative distribution of proplyds vs distance: HA98')
In [9]:
m1 = data["d"] <= 7.5
m2 = np.abs(data["d"] - 13.0) <= 5.5
m3 = np.abs(data["d"] - 25.0) <= 6.5
h1, edges = np.histogram(data["r14"][m1], bins=10, range=(0, 30), density=True)
h2, edges = np.histogram(data["r14"][m2], bins=10, range=(0, 30), density=True)
h3, edges = np.histogram(data["r14"][m3], bins=10, range=(0, 30), density=True)
plt.bar(edges[:-1], h1, 2.5, color='r', alpha=0.5, label='close: D\' < 7.5"')
plt.bar(edges[:-1], h2, 2.5, h1, color='g', alpha=0.5, label='medium: 7.5" < D\' < 18.5"')
plt.bar(edges[:-1], h3, 2.5, h1 + h2, color='b', alpha=0.5, label='far: D\' > 18.5"')
_ = plt.xlabel('Proplyd i-front radius, 1e14 cm')
_ = plt.ylabel('Fraction of proplyds in each size bin')
_ = plt.title('Histogram of proplyd sizes: HA98')
_ = plt.legend(loc='upper right')
In [10]:
_ = plt.plot(data["d"], data["r14"], 'o')
_ = plt.xlim(0.0, 30.0)
_ = plt.ylim(0.0, 30.0)
plt.xlabel("Distance, arcsec")
plt.ylabel("Radius, 1e14 cm")
Out[10]:
<matplotlib.text.Text at 0x10dd5c110>

We need to take account of binaries

Now we will try out some of the seaborn tools for visualizing data distributions. See this tutorial.

In the section on the Ricci data it occured to me that we need to mirror the data about zero because the radius and separation are positive-definite.

In [11]:
def mirrored(dataset):
    """Expand a positive-definite dataset (1d sequence) to include its negative mirror set"""
    assert np.all(dataset >= 0.0)
    return np.r_[dataset, -dataset]

def doubled(dataset):
    """Double up a dataset (1d sequence)"""
    return np.r_[dataset, dataset]
In [12]:
m30 = data["d"] <= 30.0
with sns.axes_style("white"):
    sns.jointplot(mirrored(data["d"][m30]), mirrored(data["r14"][m30]), 
                  kind="hex", size=9, xlim=(0, 30), ylim=(0, 30))
In [13]:
sns.kdeplot(mirrored(data["d"]), mirrored(data["r14"]), 
            clip=((0, 40), (0, 30)),
            bw=3.0, shade=True, cmap="Reds",
            )
None
In [14]:
color = "#774499"
g = sns.JointGrid(mirrored(data["d"][m30]), mirrored(data["r14"][m30]), size=9, xlim=(0, 30), ylim=(0, 30))
# g.plot(sns.regplot, sns.distplot, stats.pearsonr);
g.plot_marginals(sns.distplot, color=color, bins=10, rug=True, kde_kws=dict(bw=5.0, label="KDE"))
g.plot_joint(sns.regplot, color=color, ci=98, order=3, label="Cubic regression fit with 98% confidence bounds")
g.ax_joint.legend(loc="upper left")
g.ax_joint.set_xlabel("Projected separation, $D'$")
g.ax_joint.set_ylabel("Proplyd radius $r_0$, $10^{14}$ cm")
g.ax_marg_x.legend(loc="upper right")
g.ax_marg_y.legend(loc="best")
g.fig.tight_layout()
g.fig.savefig("proplyd-joint-separation-size.pdf");
#g.annotate(stats.pearsonr);

Now we will compare the sizes for different intervals of $D'$

In [15]:
datalist = [data['r14'][m1], data['r14'][m2], data['r14'][m3]]
mirrored_datalist = [mirrored(x) for x in datalist]
namelist = ['close: D\' < 7.5"', 'mid: 7.5" < D\' < 18.5"', 'far: D\' > 18.5"']

The simplest option is boxplot, which shows the median and quartiles. By setting whis=np.inf we make the whiskers extend out to the full range of the data.

In [16]:
sns.boxplot(datalist, names=namelist, whis=np.inf, color='pastel')
plt.ylabel("Proplyd size, 1e14 cm")
plt.xlabel("Projected distance band")
None

Alternatively we can use violinplot, which shows the Kernel Density Estimate (KDE) of the values in each category. By setting inner="points" it shows the actual data points instead of the quartiles.

I am a little uncomfortable of this way of presenting the data, since we are forcing the distance to be categorical, when really it is continuous.

In [17]:
sns.violinplot(mirrored_datalist, 
               names=namelist,
               inner="points", bw=0.3, color="PuBuGn")
plt.ylim(0.0, None)
plt.ylabel("Proplyd size, 1e14 cm")
plt.xlabel("Projected distance band")
None

Ricci et al (2008) sample

In [18]:
rdata = Table.read("Proplyd-Datasets/ricci-2008.dat", format='ascii')
rdata.remove_row(0) # First row is rubbish
rdata
Out[18]:
NameRAJ2000DEJ2000[OW94]TypeNote
4364-14605 34 36.44-05 21 45.950jJ
4466-32405 34 46.59-05 23 24.190jJ,B
4468-60505 34 46.76-05 26 04.790iJ
4538-31105 34 53.79-05 23 10.730rn0
4596-40005 34 59.56-05 24 00.194596-400i0
4582-63505 34 58.16-05 26 35.130i0
005-51405 35 00.47-05 25 14.34005-514i0
006-43905 35 00.58-05 24 38.790jJ
016-14905 35 01.60-05 21 49.350rn0
038-62705 35 04.19-05 26 27.89038-627i0
044-52705 35 04.42-05 25 27.40044-527i0
..................
304-53905 35 30.41-05 25 38.63304-539i0
307-180705 35 30.70-05 18 07.24307-1807i0
314-81605 35 31.40-05 28 16.480i0
321-60205 35 32.10-05 26 01.94321-602dEO
332-40505 35 33.19-05 24 04.740d0
332-160505 35 33.20-05 16 05.38332-1605i0
346-155305 35 34.62-05 15 52.920dFO
347-153505 35 34.67-05 15 34.88347-1535dJ
351-334905 35 35.13-05 33 49.180iJ
353-13005 35 35.32-05 21 29.590jJ,B
473-24505 35 47.34-05 22 44.820dEO

Bright proplyds have i in the Type column.

In [19]:
m = rdata['Type'] == 'i'
md = rdata['Type'] == 'd'
mj = rdata['Type'] == 'j'
mr = rdata['Type'] == 'rn'
In [20]:
rdata[m]
Out[20]:
NameRAJ2000DEJ2000[OW94]TypeNote
4468-60505 34 46.76-05 26 04.790iJ
4596-40005 34 59.56-05 24 00.194596-400i0
4582-63505 34 58.16-05 26 35.130i0
005-51405 35 00.47-05 25 14.34005-514i0
038-62705 35 04.19-05 26 27.89038-627i0
044-52705 35 04.42-05 25 27.40044-527i0
046-24505 35 04.63-05 22 44.850i0
049-14305 35 04.94-05 21 42.990i0
057-41905 35 05.73-05 24 18.55057-419i0
061-40105 35 06.09-05 24 00.60061-401i0
064-333505 35 06.44-05 33 35.250i0
..................
266-55805 35 26.62-05 25 57.840i0
280-93105 35 27.96-05 29 31.150i0
282-45805 35 28.20-05 24 58.19282-458iJ
282-61405 35 28.20-05 26 14.200i0
284-43905 35 28.40-05 24 38.690i0
297-02505 35 29.67-05 30 24.750i0
304-53905 35 30.41-05 25 38.63304-539i0
307-180705 35 30.70-05 18 07.24307-1807i0
314-81605 35 31.40-05 28 16.480i0
332-160505 35 33.20-05 16 05.38332-1605i0
351-334905 35 35.13-05 33 49.180iJ
In [21]:
print("Number of bright proplyds:", len(rdata[m]))
Number of bright proplyds: 178
In [22]:
from astropy import coordinates as coord
import astropy.units as u

Try out the new coordinates interface.

In [23]:
c = coord.SkyCoord(ra="05 35 33.20", dec="-05 16 05.38", unit=("hourangle", "deg"))
c
Out[23]:
<SkyCoord (ICRS): ra=83.88833333333332 deg, dec=-5.268161111111111 deg>

Add a column with an astropy SkyCoord instance for each object

In [24]:
rdata["coord"] = coord.SkyCoord(ra=rdata["RAJ2000"], dec=rdata["DEJ2000"], unit=("hourangle", "deg"))

Find the coordinates of $\theta^1$ Ori C:

In [25]:
c0 = coord.get_icrs_coordinates("tet01 ori c")
c0
Out[25]:
<SkyCoord (ICRS): ra=83.818598977 deg, dec=-5.389680154 deg>
In [26]:
c0.separation(c).arcsec
Out[26]:
503.84327222379954

Add another column with the projected separation in arcsec:

In [27]:
rdata['Dprime'] = [c0.separation(c).arcsec for c in rdata["coord"]]

Make a cumulative histogram to compare with the HA98 sample:

In [28]:
rdata['Dprime'][m].max(), rdata['Dprime'].max()
Out[28]:
(685.54604284372397, 932.10782801757205)
In [29]:
rmax = 800.0
nbins = rmax//4
_ = plt.hist(rdata["Dprime"][m], bins=nbins, range=(0, rmax), cumulative=True, color='b', alpha=0.3, label='Ricci 2008')
_ = plt.hist(data["d"], bins=nbins, range=(0, rmax), cumulative=True, color='r', alpha=0.3, label='Henney & Arthur 1998')
_ = plt.hist(rdata["Dprime"][md], bins=nbins, range=(0, rmax), cumulative=True, color='y', alpha=0.7, label='Dark disks')
_ = plt.hist(rdata["Dprime"][mr], bins=nbins, range=(0, rmax), cumulative=True, color='g', alpha=0.7, label='Reflec nebs')
_ = plt.hist(rdata["Dprime"][mj], bins=nbins, range=(0, rmax), cumulative=True, color='m', alpha=0.7, label='Bare jets')
_ = plt.xlabel('Projected distance D\', arcsec')
_ = plt.ylabel('# of proplyds with projected distance < D\'')
_ = plt.title('Cumulative distribution of proplyds vs distance: Both samples')
plt.legend(loc='upper left')
plt.gcf().set_size_inches(18, 8)
plt.gcf().savefig("proplyd-distance-distribution.pdf")

Try it again, but non-cumulative. If the cumulative distribution is linear in $D'$ then the non-cumulative one should be constant.

In [30]:
rmax = 800.0
nbins = rmax//20
_ = plt.hist(rdata["Dprime"][m], bins=nbins, range=(0, rmax), cumulative=False, color='b', alpha=0.3, label='Ricci 2008')
_ = plt.hist(data["d"], bins=nbins, range=(0, rmax), cumulative=False, color='r', alpha=0.3, label='Henney & Arthur 1998')
_ = plt.hist(rdata["Dprime"][md], bins=nbins, range=(0, rmax), cumulative=False, color='y', alpha=0.7, label='Dark disks')
_ = plt.hist(rdata["Dprime"][mr], bins=nbins, range=(0, rmax), cumulative=False, color='g', alpha=0.7, label='Reflec nebs')
_ = plt.hist(rdata["Dprime"][mj], bins=nbins, range=(0, rmax), cumulative=False, color='m', alpha=0.7, label='Bare jets')
_ = plt.xlabel('Projected distance D\', arcsec')
_ = plt.ylabel('# of proplyds with projected distance < D\'')
_ = plt.title('Distribution of proplyds vs distance: Both samples')
plt.legend(loc='upper right')
plt.gcf().set_size_inches(12, 6)

The ultimate tool for plotting a distribution is distplot. This can show the following if we turn on all the options:

  1. A rugplot of the individual data points
  2. A histogram of the distribution
  3. The KDE of the distribution
  4. A fit to the distribution of any function from scipy.stats
In [31]:
sns.distplot(rdata['Dprime'][m], 
             rug=True, 
             fit=stats.expon, 
             kde_kws={"clip": (0.0, 800.0), "kernel": "gau", "label": "Gaussian KDE"}, 
             rug_kws={"alpha": 0.3},
             fit_kws={"label": "Exponential fit"}, 
             bins=800//25,
             label="Histogram (25 arcsec bins)", axlabel=r"Projected distance, $D'$")
plt.legend()
plt.xlim(0.0, 800.0)
plt.title("Distribution of projected distances for all bright proplyds from Ricci et al. (2008) sample")
None

What I don't like about this is that the KDE is affected by the fact that there are no points with $D' < 0$. That is why it drops towards zero. To fix this, we could reflect the dataset about zero and add a negative mirrored dataset.

Note that stats.laplace is actually a 2-sided explonential. And it doesn't fit very well. On the other hand, the KDE now looks much nicer.

In [32]:
sns.distplot(np.r_[rdata['Dprime'][m], -rdata['Dprime'][m]], 
             rug=True, 
             fit=stats.logistic, 
             kde_kws={"kernel": "gau", "label": "KDE"}, 
             rug_kws={"alpha": 0.3},
             fit_kws={"label": "Sech-squared fit"}, 
             bins=2*800//25,
             label="Histogram (25 arcsec bins)", axlabel=r"Projected distance, $D'$")
plt.legend()
plt.xlim(0.0, 800.0)
plt.title("Distribution of projected distances for all bright proplyds from Ricci et al. (2008) sample")
None

So now, we will try the same trick on the joint distribution of $D'$ and $r_0$. This is in a previous section.

Zooming in on the central part, which is relevant to the bowshocks.

In [33]:
Dmax = 110.0
Dmaxx = 800.0
mzoom = rdata['Dprime'] < Dmax
dataset = mirrored(np.array(rdata['Dprime'][m & mzoom]))
sns.distplot(dataset,
             rug=True, 
             fit=stats.uniform, 
             kde_kws={"kernel": "gau", "label": "Gaussian KDE"}, 
             rug_kws={"alpha": 0.3},
             fit_kws={"label": "Constant fit"},
             hist_kws={"range": (-Dmaxx, Dmaxx)},
             bins=2*Dmaxx//10,
             label="Histogram (10 arcsec bins)", axlabel=r"Projected distance, $D'$")
plt.legend(loc="upper left")
plt.xlim(0.0, 100.0)
plt.title("Distribution of projected distances for all bright proplyds from Ricci et al. (2008) sample");

So this suggests that at scales $< 100''$ the proplyd distribution is uniform (implying density $\sim D^{-2}$.

Whereas at larger scales it does fall off.

What about the stars?

In [34]:
sdata = Table.read("Proplyd-Datasets/Robberto2013/table5.dat", format='ascii')
In [35]:
sdata.colnames
Out[35]:
['Seq',
 'oncacs',
 'RAh',
 'RAm',
 'RAs',
 'DE-',
 'DEd',
 'DEm',
 'DEs',
 'visit',
 'CCD',
 'x435',
 'x555',
 'x658',
 'x775',
 'x850',
 'y435',
 'y555',
 'y658',
 'y775',
 'y850',
 'F435mag',
 'e_F435mag',
 'f_F435mag',
 'F555mag',
 'e_F555mag',
 'f_F555mag',
 'F658mag',
 'e_F658mag',
 'f_F658mag',
 'F775mag',
 'e_F775mag',
 'f_F775mag',
 'F850mag',
 'e_F850mag',
 'f_F850mag',
 'rad435',
 'rad555',
 'rad658',
 'rad775',
 'rad850',
 'csky435',
 'csky555',
 'csky658',
 'csky775',
 'csky850',
 'e_csky435',
 'e_csky555',
 'e_csky658',
 'e_csky775',
 'e_csky850',
 'isky435',
 'isky555',
 'isky658',
 'isky775',
 'isky850',
 'osky435',
 'osky555',
 'osky658',
 'osky775',
 'osky850',
 'max435',
 'max555',
 'max658',
 'max775',
 'max850',
 'pixsat435',
 'pixsat555',
 'pixsat658',
 'pixsat775',
 'pixsat850',
 'radsat435',
 'radsat555',
 'radsat658',
 'radsat775',
 'radsat850',
 'type',
 'strip',
 'xstrip',
 'ystrip',
 'F435smag',
 'F555smag',
 'F658smag',
 'F775smag',
 'F850smag',
 'e_F435smag',
 'e_F555smag',
 'e_F658smag',
 'e_F775smag',
 'e_F850smag',
 'rad435s',
 'rad555s',
 'rad658s',
 'rad775s',
 'rad850s',
 'sky435s',
 'sky555s',
 'sky658s',
 'sky775s',
 'sky850s',
 'dsky435s',
 'dksy555s',
 'dsky658s',
 'dksy775s',
 'dsky850s',
 'Date',
 'time']

We first need to restrict the table to unique sources, since it contains multiple observations of the same source. We can do this by comparing the first two columns:

In [36]:
m_uniq = sdata['Seq'] == sdata['oncacs']

Then winnowing down the table:

In [37]:
sdata = sdata[m_uniq]

Description of the type column from notes to Table 5:

Source type, from visual inspection: (0) not measured; (1) detected in at least one filter and unresolved; (2) double (companion closer than ≈3 pixels, one entry for both sources); (3) wide double (companion further than ≈3 pixel, one entry for each source); (5) silhouette disk; (6) photoionized (proplyd or with other evidence of photoionization); (7) galaxy; (8) Herbig-Haro.
In [38]:
source_types = ['not measured', 'single', 'close double', 'wide double', 
                '???', 'disk', 'ionized', 'galaxy', 'herbig haro']
count_table = Table(names=['Code', 'Type', 'Number'], 
                    dtype=[int, 'S12', int])
for itype, label in enumerate(source_types):
    count_table.add_row([itype, label, np.sum(sdata['type'] == itype)])
count_table
Out[38]:
CodeTypeNumber
0not measured2
1single2912
2close double47
3wide double154
4???0
5disk36
6ionized184
7galaxy59
8herbig haro5

The ionized type is "proplyd or with other evidence of photoionization" and there are 184 of them, compared with 178 in the Ricci catalog. This is close enough for me.

Next job is to winnow out types 0, 7, and 8, which we don't want.

In [39]:
sdata = sdata[(sdata['type'] > 0) & (sdata['type'] < 7)]

And now create a mask to select out the ionized sources (mainly proplyds).

In [40]:
m_rob_ionized = sdata['type'] == 6

Now we combine the 6 columns used for coordinates into a single RA-Dec string:

In [41]:
sdata["coordstring"] = ["{} {} {} -{} {} {}".format(*args)
                  for args in zip(*[sdata[col] for col in ["RAh", "RAm", "RAs", "DEd", "DEm", "DEs"]])]
In [42]:
sdata["coordstring"]
Out[42]:
<MaskedColumn name='coordstring' unit=None format=None description=None>
masked_BaseColumn(data = ['5 34 9.889 -5 22 26.26' '5 34 10.25 -5 21 52.29'
 '5 34 11.111 -5 22 54.46' ..., '5 35 52.472 -5 38 7.85'
 '5 35 53.093 -5 30 10.38' '5 35 53.504 -5 39 32.88'],
                  mask = [False False False ..., False False False],
            fill_value = N/A)

Then that string can be used to initialize an astropy.coordinates.SkyCoord instance for each source.

Warning: The following cell takes a couple of minutes to run.

In [43]:
sdata["coord"] = coord.SkyCoord(sdata["coordstring"], unit=("hourangle", "deg"))
In [44]:
sdata["coord"]
Out[44]:
<MaskedColumn name='coord' unit=None format=None description=None>
masked_BaseColumn(data = [<SkyCoord (ICRS): ra=83.54120416666666 deg, dec=-5.373961111111111 deg>
 <SkyCoord (ICRS): ra=83.54270833333332 deg, dec=-5.3645249999999995 deg>
 <SkyCoord (ICRS): ra=83.54629583333332 deg, dec=-5.381794444444444 deg>
 ...,
 <SkyCoord (ICRS): ra=83.96863333333332 deg, dec=-5.635513888888888 deg>
 <SkyCoord (ICRS): ra=83.97122083333332 deg, dec=-5.502883333333333 deg>
 <SkyCoord (ICRS): ra=83.97293333333332 deg, dec=-5.659133333333334 deg>],
                  mask = [False False False ..., False False False],
            fill_value = ?)

And that in turn can be used to find the separation of each source from $\theta^1$ C.

In [45]:
sdata['Dprime'] = [c0.separation(c).arcsec for c in sdata["coord"]]
In [46]:
len(sdata['Dprime'])
Out[46]:
3333
In [47]:
rmax = 1200.0
nbins = rmax//4
_ = plt.hist(sdata["Dprime"], bins=nbins, range=(0, rmax), cumulative=True, color='b', alpha=0.3, label='Robberto 2013')
_ = plt.hist(sdata["Dprime"][m_rob_ionized], bins=nbins, range=(0, rmax), cumulative=True, color='r', alpha=0.3, label='Ionized, Robberto 2013')
_ = plt.hist(rdata["Dprime"][m], bins=nbins, range=(0, rmax), cumulative=True, color='y', alpha=0.7, label='Proplyds, Ricci 2008')
_ = plt.xlabel('Projected distance D\', arcsec')
_ = plt.ylabel('# of stars with projected distance < D\'')
_ = plt.title('Cumulative distribution of stars vs distance: Robberto ACS sample')
plt.legend(loc='upper left')
plt.gcf().set_size_inches(18, 8)
In [48]:
rmax = 1200.0
binsize = 25
nbins = rmax//binsize
_ = plt.hist(sdata["Dprime"], bins=nbins, range=(0, rmax), cumulative=False, color='b', alpha=0.3, label='Robberto 2013')
_ = plt.hist(sdata["Dprime"][m_rob_ionized], bins=nbins, range=(0, rmax), cumulative=False, color='r', alpha=0.3, label='Ionized, Robberto 2013')
_ = plt.hist(rdata["Dprime"][m], bins=nbins, range=(0, rmax), cumulative=False, color='y', alpha=0.7, label='Proplyds, Ricci 2008')
_ = plt.xlabel('Projected distance D\', arcsec')
_ = plt.ylabel('# of stars with projected distance D\' (25 arcsec bins)')
_ = plt.title('Distribution of stars vs distance: Robberto ACS sample')
plt.legend(loc='upper right')
plt.gcf().set_size_inches(18, 8)
In [49]:
rmax = 1200.0
sns.distplot(mirrored(sdata['Dprime']), 
             rug=True, 
             fit=stats.semicircular, 
             kde_kws={"kernel": "gau", "label": "KDE"}, 
             rug_kws={"alpha": 0.03},
             fit_kws={"label": "Semi-circular fit"}, 
             bins=2*rmax//25,
             label="Histogram (25 arcsec bins)", axlabel=r"Projected distance, $D'$")
plt.legend()
plt.xlim(0.0, 1200.0)
plt.title("Distribution of projected distances for all stars from Robberto et al. (2013) sample")
None

Zoom in on inner 200''

In [50]:
rmax = 200.0
nbins = rmax
_ = plt.hist(sdata["Dprime"], bins=nbins, range=(0, rmax), cumulative=True, color='b', alpha=0.3, label='Robberto 2013')
_ = plt.hist(sdata["Dprime"][m_rob_ionized], bins=nbins, range=(0, rmax), cumulative=True, color='r', alpha=0.3, label='Ionized, Robberto 2013')
_ = plt.hist(rdata["Dprime"][m], bins=nbins, range=(0, rmax), cumulative=True, color='y', alpha=0.7, label='Proplyds, Ricci 2008')
_ = plt.xlabel('Projected distance D\', arcsec')
_ = plt.ylabel('# of stars with projected distance < D\'')
_ = plt.title('Cumulative distribution of stars vs distance: Robberto ACS sample')
plt.legend(loc='upper left')
plt.gcf().set_size_inches(18, 8)

Zoom in on inner 30''

In [51]:
rmax = 30.0
nbins = rmax
_ = plt.hist(sdata["Dprime"], bins=nbins, range=(0, rmax), cumulative=True, color='b', alpha=0.3, label='Robberto 2013')
_ = plt.hist(sdata["Dprime"][m_rob_ionized], bins=nbins, range=(0, rmax), cumulative=True, color='r', alpha=0.3, label='Ionized, Robberto 2013')
_ = plt.hist(rdata["Dprime"][m], bins=nbins, range=(0, rmax), cumulative=True, color='y', alpha=0.7, label='Proplyds, Ricci 2008')
_ = plt.xlabel('Projected distance D\', arcsec')
_ = plt.ylabel('# of stars with projected distance < D\'')
_ = plt.title('Cumulative distribution of stars vs distance: Robberto ACS sample')
plt.legend(loc='upper left')
plt.gcf().set_size_inches(18, 8)

Maps of different source samples

In [52]:
rob_ra = [c.ra.deg for c in sdata['coord']]
rob_dec = [c.dec.deg for c in sdata['coord']]
In [53]:
rob_ra = np.array(rob_ra)
rob_dec = np.array(rob_dec)
In [54]:
ricci_ra = np.array([c.ra.deg for c in rdata['coord']])
ricci_dec = np.array([c.dec.deg for c in rdata['coord']])
In [55]:
import aplpy
import os
In [56]:
from imp import reload
In [57]:
aplpy.__version__
Out[57]:
'0.9.13.dev319'
In [58]:
hstdir = os.path.expanduser("~/Dropbox/JorgeBowshocks/HST")
In [59]:
fig = aplpy.FITSFigure(os.path.join(hstdir, "mosaicf656-fixw-align.fits"))
fig.show_grayscale(vmin=0.0, vmax=5.e3)
WARNING: FITSFixedWarning: RADECSYS= 'LINEAR ' 
RADECSYS is non-standard, use RADESYSa. [astropy.wcs.wcs]
WARNING:astropy:FITSFixedWarning: RADECSYS= 'LINEAR ' 
RADECSYS is non-standard, use RADESYSa.
WARNING: FITSFixedWarning: 'datfix' made the change 'Changed '21/03/95          ' to '1995-03-21''. [astropy.wcs.wcs]
WARNING:astropy:FITSFixedWarning: 'datfix' made the change 'Changed '21/03/95          ' to '1995-03-21''.
WARNING: Cannot determine equinox. Assuming J2000. [aplpy.wcs_util]
WARNING:astropy:Cannot determine equinox. Assuming J2000.
WARNING: Cannot determine equinox. Assuming J2000. [aplpy.wcs_util]
WARNING:astropy:Cannot determine equinox. Assuming J2000.
In [60]:
fig._figure.set_size_inches(18, 18)
fig.show_markers(ricci_ra[m], ricci_dec[m], layer='Ricci proplyds', edgecolor='c', lw=3, alpha=0.8)
fig.show_markers(rob_ra[m_rob_ionized], rob_dec[m_rob_ionized], layer='Robb proplyds', edgecolor='orange')
fig.show_markers(rob_ra[~m_rob_ionized], rob_dec[~m_rob_ionized], layer='Robb non-proplyds', edgecolor='y', marker='*')
fig.recenter(c0.ra.deg, c0.dec.deg, radius=60/3600)
ax = fig._figure.axes[0]
ax.set_title('A title')
fig._figure
Out[60]:

So the conclusion of this section is that the Ricci proplyd catalog (shown by blue circles on the image above) is more reliable than the Robberto ionized catalog (shown by orange circles) for the close-in proplyds. Robberto is missing a lot of obvious proplyds, and furthermore has a few sources that don't seem to correspond to anything, although some might be bowshocks or jet knots.

The Ricci catalog is only missing a very few, such as the one very close to th1c.

Monte Carlo simulations

Assume power law distribution in radius.

In [61]:
class SourcePopulation(object):

    def __init__(self, N, Dmin=0.0, Dmax=800.0, m=2.6):
        """Return `N` samples of sources drawn from a radial distribution with density ~ D^-m
        Maximum and minimum radii can be specified with `Dmin` and `Dmax`. 
        If `Dmin = 0` then `m < 3` is required."""
        assert(m < 3)

        self.N = N
        self.m = m
        self.Dmin = Dmin
        self.Dmax = Dmax
        
        # Radial distances from center
        xi = np.random.random_sample((N,))
        x0 = Dmin/Dmax
        self.D = Dmax*((1 - x0**(3.0 - m))*xi + x0**(3.0 - m))**(1.0/(3.0 - m))
        # Angle cosine with LOS
        xi = np.random.random_sample((N,))
        self.mu = 2*xi - 1.0
        # Position angle
        xi = np.random.random_sample((N,))
        self.PA = 360.0*xi

        # Inclination to plane of sky inc = [-90:90]
        self.inc = np.degrees(np.arcsin(self.mu))
        
        # Projected distance
        self.Dprime = self.D*np.sqrt(1.0 - self.mu**2)
        
        # Plane-of-sky coords
        self.Dec = self.Dprime*np.cos(np.radians(self.PA))
        self.RA = self.Dprime*np.sin(np.radians(self.PA))
In [62]:
p = SourcePopulation(150, m=2.1, Dmin=5.0, Dmax=250.0)
p2 = SourcePopulation(30, m=2.5, Dmin=150.0, Dmax=800.0)
Dprimes = np.r_[p.Dprime, p2.Dprime]
In [63]:
rmax = 800.0
nbins = rmax//4
_ = plt.hist(rdata["Dprime"][m], bins=nbins, range=(0, rmax), cumulative=True, color='b', alpha=0.3, label='Ricci 2008')
_ = plt.hist(Dprimes, bins=nbins, range=(0, rmax), cumulative=True, color='r', alpha=0.3, label='Fake sample')
# _ = plt.hist(p.Dprime, bins=nbins, range=(0, rmax), cumulative=True, color='g', alpha=0.3, label='p')
# _ = plt.hist(p2.Dprime, bins=nbins, range=(0, rmax), cumulative=True, color='y', alpha=0.8, label='p2')
_ = plt.xlabel('Projected distance D\', arcsec')
_ = plt.ylabel('# of proplyds with projected distance < D\'')
_ = plt.title('Cumulative distribution of proplyds vs distance: Both samples')
plt.legend(loc='upper left')
plt.gcf().set_size_inches(18, 8)
In [64]:
import seaborn
In [65]:
cmap = seaborn.palettes.dark_palette('red', as_cmap=True)
In [66]:
rmax = 800.0
nbins = rmax//25
datasets = [rdata["Dprime"][m], Dprimes, p.Dprime, p2.Dprime]
colors = ['b', 'y', 'g', 'y']
labels = ['Ricci 2008', 'Fake sample', 'p', 'p2']
_ = plt.hist(datasets, bins=nbins, range=(0, rmax), cumulative=False, color=None, label=labels)
_ = plt.xlabel('Projected distance D\', arcsec')
_ = plt.ylabel('# of proplyds with projected distance = D\' (bin width 25 arcsec)')
_ = plt.title('Distribution of proplyds vs distance: Both samples')
plt.legend(loc='upper right')
plt.gcf().set_size_inches(18, 8)
In [67]:
Dmax = 110.0
Dmaxx = 800.0
mzoom = Dprimes < Dmax
dataset = Dprimes[mzoom]
sns.distplot(dataset,
             rug=True, 
             fit=stats.uniform, 
             kde_kws={"kernel": "gau", "label": "Gaussian KDE"}, 
             rug_kws={"alpha": 0.3},
             fit_kws={"label": "Constant fit"},
             hist_kws={"range": (0, Dmaxx)},
             bins=Dmaxx//10,
             label="Histogram (10 arcsec bins)", axlabel=r"Projected distance, $D'$")
plt.legend(loc="upper left")
plt.xlim(0.0, 100.0)
plt.title("Distribution of projected distances from Monta Carlo fake sample");

Compare the projected separation and the inclination for the inner 30 arcsec of the MC sample

Assume that the inner wind shock lies at about $D = 26''$. Only proplyds that are physically clser than this will have Type I bowshocks. These are shown by orange points in the figure. Physically more distant proplyds are shown by gray points. We also show contours of physical separation every $10''$ from $D = 10''$ to $D = 200''$.

In [68]:
color = ".5"
m30 = p.Dprime <= 30.0
Dmax = 26.0
mwind = p.D <= Dmax
g = sns.JointGrid(p.Dprime[m30], p.inc[m30], size=9, xlim=(0, 30), ylim=(-90, 90))
g.plot_marginals(sns.distplot, color=color, rug=True, 
                 kde_kws=dict(label="KDE", bw="silverman", cut=0))
#g.plot_joint(sns.regplot, color=color, fit_reg=False, scatter_kws=dict(s=50))
g.ax_joint.scatter(p.Dprime[mwind], p.inc[mwind], marker='o', c="orange", s=50, label="D < {}".format(Dmax))
g.ax_joint.scatter(p.Dprime[~mwind], p.inc[~mwind], marker='o', c=color, s=50, label="D > {}".format(Dmax))
inc_pts = np.linspace(-90.0, 90.0)
Dprime_pts = Dmax*np.cos(np.radians(inc_pts))
g.ax_joint.plot(Dprime_pts, inc_pts, '-')
for D in np.arange(10, 200, 10):
    g.ax_joint.plot(Dprime_pts*D/Dmax, inc_pts, '-', color='k', lw=0.5, alpha=0.3)
g.ax_joint.legend(loc="upper center", frameon=True, shadow=True)
g.ax_joint.set_xlabel("Projected separation, $D'$")
g.ax_joint.set_ylabel("Inclination, degrees")
g.ax_marg_x.legend(loc="upper right")
g.ax_marg_y.legend(loc="best")
g.fig.tight_layout()
#g.fig.savefig("proplyd-joint-separation-size.pdf");

Distribution of sizes

Read in the size table that I converted from the DS9 regions.

In [69]:
sizedata = Table.read("Proplyd-Datasets/proplyd-chords-D60.dat", format='ascii.commented_header')
sizedata[:4]
Out[69]:
IDRA1DEC1RA2DEC2
154-2405:35:15.394-5:22:39.615:35:15.354-5:22:39.71
163-2495:35:16.340-5:22:48.795:35:16.324-5:22:48.80
165-235S5:35:16.487-5:22:35.295:35:16.458-5:22:35.22
165-235N5:35:16.488-5:22:34.975:35:16.479-5:22:34.97

Create coord.SkyCoord objects for the two ends of the chord that I measured at the back of each proplyd's head. Then use those to find the chord diameter in arcsec (Chord column), and then convert to a radius in units of $10^{14}$ cm (r14 column).

In [70]:
coords1 = coord.SkyCoord(ra=sizedata["RA1"], dec=sizedata["DEC1"], unit=("hourangle", "deg"))
coords2 = coord.SkyCoord(ra=sizedata["RA2"], dec=sizedata["DEC2"], unit=("hourangle", "deg"))
sizedata["Chord"] = [c1.separation(c2).arcsec for c1, c2 in zip(coords1, coords2)]
sizedata["r14"] = 0.5 * sizedata["Chord"] * 430*u.AU.in_units(u.cm)/1.e14
sizedata[:4]
Out[70]:
IDRA1DEC1RA2DEC2Chordr14
154-2405:35:15.394-5:22:39.615:35:15.354-5:22:39.710.60567146710619.4805397919
163-2495:35:16.340-5:22:48.795:35:16.324-5:22:48.800.2391518179047.69196958749
165-235S5:35:16.487-5:22:35.295:35:16.458-5:22:35.220.43870683708414.1103658682
165-235N5:35:16.488-5:22:34.975:35:16.479-5:22:34.970.1344060910844.32297598258

Take the average of the RA and DEC components of the two ends of the chord to get an average coordinate (coords0) for each source. Note: This will not quite be the center of the proplyd, but it is close enough.

In [71]:
coords0 = coord.SkyCoord(ra=0.5*(coords1.ra + coords2.ra), dec=0.5*(coords1.dec + coords2.dec))
coords0[:4]
Out[71]:
<SkyCoord (ICRS): (ra, dec) in deg
    [(83.81405833333332, -5.377683333333333),
     (83.81804999999999, -5.380220833333333),
     (83.81863541666667, -5.376459722222222),
     (83.81868124999998, -5.3763805555555555)]>

Remove the columns with the chord end coordinates because we don't need them any more.

In [72]:
sizedata.remove_columns(["RA1", "RA2", "DEC1", "DEC2"])
sizedata[:4]
Out[72]:
IDChordr14
154-2400.60567146710619.4805397919
163-2490.2391518179047.69196958749
165-235S0.43870683708414.1103658682
165-235N0.1344060910844.32297598258

Add some more columns:

  • RA and Dec corresponding to coords0
  • Projected distance Dprime from th1C (in arcsec)
  • Position angle PA from th1C (in degrees).
In [73]:
sizedata['RA'] = coords0.ra.to_string(unit=u.hourangle, sep=':')
sizedata['Dec'] = coords0.dec.to_string(sep=':')
sizedata['Dprime'] = c0.separation(coords0).arcsec
sizedata['PA'] = c0.position_angle(coords0).deg
sizedata[-4:]
Out[73]:
IDChordr14RADecDprimePA
163-3230.1011089261723.252021213775:35:16.3125-5:23:22.62.27242083601276.279471062
153-3210.08000000000592.573083376235:35:15.342-5:23:21.3116.8224369079275.247298049
167-2310.32854879080510.5672928985:35:16.728-5:22:31.1851.81903175474.36755966222
171-3340.2094863360216.737822609055:35:17.0625-5:23:34.11514.3834063732141.563347547

Now we try and merge in the HA98 table.

In [74]:
from astropy.table import join, Column

First naive attempt was a straight outer join on the ID column. This gave some objects that are in the HA98 table but not in the new data. Of these, 244-440 is OK, since it is outside our distance cut-off, but the others need fixing. 177-341 (not in Ricci catalog) is just a matter of the name, which should be 177-341W. There were also two really missing: 171-334 and 154-324 (my typo).

So we need to fix things up. First make the ID field in the HA98 table wider and fix the name of 177-341W.

This took much longer than it should have done. It turns out that you can't change the dtype of a column without making a new table.

In [75]:
data = Table(data, names=data.colnames, dtype=['<U12', '<f8', '<f8', '<f8'])
data['ID'] = Column(data['ID'], dtype='<U12')
irow = np.argmax(data['ID'] == '177-341')
data['ID'][irow] = '177-341W'

So now we can do the join properly:

In [76]:
jdata = join(sizedata, data, join_type='outer', keys='ID')

And the only one missing is 244-440:

In [77]:
jdata[~jdata["r14_2"].mask & jdata["r14_1"].mask]
Out[77]:
IDChordr14_1RADecDprimePAdr14_2VicR14
244-440------------142.3104.0180.6

Now we tidy up some of the column names:

In [78]:
jdata.rename_column("r14_1", "r14")
jdata.rename_column("r14_2", "r14_HA")
jdata.rename_column("d", "Dprime_HA")

Look at the rows that are in both datasets:

In [79]:
overlap = ~jdata["r14"].mask & ~jdata["r14_HA"].mask
jdata[overlap]
Out[79]:
IDChordr14RADecDprimePADprime_HAr14_HAVicR14
152-3190.4021371270612.93415445665:35:15.197-5:23:18.6619.3754821445282.48440358319.1618.214.8
153-3210.08000000000592.573083376235:35:15.342-5:23:21.3116.8224369079275.24729804916.971.28.4
154-3240.09999999999593.216354219925:35:15.342-5:23:24.0616.795668529265.86353272216.633.210.3
155-3380.40262236469412.94976141775:35:15.505-5:23:37.45520.4534702584224.42783197920.4817.029.7
157-3230.1000000000023.21635422015:35:15.705-5:23:22.4511.3380074485272.01433163410.972.518.4
158-3230.170654665875.488858547425:35:15.8045-5:23:22.4559.8529615849272.2890310729.426.330.6
158-3260.2323259753567.472426312635:35:15.8285-5:23:25.549.86108732118254.1608255389.611.331.6
158-3270.4002366670412.87302893055:35:15.7755-5:23:26.58510.9362570123250.0220232810.616.635.5
159-3380.1915755186916.161747280015:35:15.886-5:23:38.0817.5053947123209.52961366117.25.013.5
159-3500.62592227142620.13187739125:35:15.9375-5:23:50.12528.3860230251196.0726060128.3520.144.5
161-3140.1860494671315.984009887465:35:16.095-5:23:14.05510.3755605918327.94349148110.245.317.1
..............................
167-3170.2495569195588.026634513635:35:16.7455-5:23:16.2457.8300719740232.50361387187.837.935.8
168-3280.1269980216774.08470622965:35:16.76-5:23:28.1556.90872052245140.1817864466.642.821.0
169-3380.09449034060543.03914405765:35:16.883-5:23:38.09516.4818823889157.67488234516.472.816.1
170-3370.41624042726513.38776654795:35:16.98-5:23:37.20516.2954814572151.76426977816.212.236.8
171-3340.2094863360216.737822609055:35:17.0625-5:23:34.11514.3834063732141.56334754714.294.722.9
171-3400.5462728212717.57006893995:35:17.0555-5:23:39.80519.120982071152.47369841119.1123.323.2
173-3410.1694140229225.448955075615:35:17.327-5:23:41.4222.6072175472145.23369658122.484.118.1
176-3250.2699999999978.684156394045:35:17.569-5:23:24.78516.618585154496.691687202416.386.923.5
177-341W0.60729734091719.53283365285:35:17.684-5:23:41.09525.7875745289135.03756966625.8420.451.6
180-3310.32619216590510.49149549365:35:18.057-5:23:30.74525.0691007756108.36035226325.1212.229.3
182-4130.99894955477932.12975616135:35:18.2155-5:24:13.15556.7015784227152.52558854456.736.948.7

Check that the two estimates of the proplyd sizes are not too different.

In [80]:
x, y = [np.log10(jdata[overlap][colname]) for colname in ['r14', 'r14_HA']]
g = sns.jointplot(x, y, size=8)
g.ax_joint.plot([0, 2], [0, 2], '-w', zorder=-100, lw=1)
g.ax_joint.set_xlim(0.0, 2.0)
g.ax_joint.set_ylim(0.0, 2.0);

So it doesn't look too bad. Here are the cases where HA98 got >30% larger sizes. These are mainly large sized.

In [81]:
jdata[overlap][jdata[overlap]['r14_HA'] > 1.3*jdata[overlap]['r14']]['ID', 'r14', 'r14_HA']
Out[81]:
IDr14r14_HA
152-31912.934154456618.2
155-33812.949761417717.0
158-3267.4724263126311.3
161-3286.418837484879.1
171-34017.570068939923.3

And here are the cases where HA98 sizes were more than 30% smaller. These are mainly small sized. In this case, I think that the current results are more reliable because in HA98 we did not remove the central stars and so overestimated the compactness of some of the small proplyds.

In [82]:
jdata[overlap][jdata[overlap]['r14'] > 1.3*jdata[overlap]['r14_HA']]['ID', 'r14', 'r14_HA']
Out[82]:
IDr14r14_HA
153-3212.573083376231.2
163-3233.252021213772.2
166-3163.727059189252.5
168-3284.08470622962.8
171-3346.737822609054.7
173-3415.448955075614.1

If we look at the distribution of ratios, then it seems we have agreement to a level of about 30%. This is not bad, given that the sizes range over an order of magnitude.

In [83]:
ratio = jdata['r14']/jdata['r14_HA']
sns.distplot(ratio[overlap], rug=True, fit=stats.norm, axlabel='r14 / r14_HA')
plt.title('Current vs HA98 estimates of proplyd size:'
          ' ratio = {:.3f} +/- {:.3f}'.format(ratio[overlap].mean(), 
                                                 ratio[overlap].std()))
Out[83]:
<matplotlib.text.Text at 0x110965fd0>

We can do the same thing for the separations. This shows a much tighter correlation, as one would expect

In [84]:
x, y = [np.log10(jdata[overlap][colname]) for colname in ['Dprime', 'Dprime_HA']]
g = sns.jointplot(x, y, size=8)
g.ax_joint.plot([0, 2], [0, 2], '-w', zorder=-100, lw=1)
g.ax_joint.set_xlim(0.0, 2.0)
g.ax_joint.set_ylim(0.0, 2.0);

And just for fun, here is the same plot for the Vicente & Alves (2005) sizes.

In [85]:
x, y = [np.log10(jdata[overlap][colname]) for colname in ['VicR14', 'r14']]
g = sns.jointplot(x, y, color='brown', size=8)
g.ax_joint.plot([0, 2], [0, 2], '-w', zorder=-100, lw=1)
g.ax_joint.set_xlim(0.0, 2.0)
g.ax_joint.set_ylim(0.0, 2.0);

Finally, we can go back to the size-distance relationship. First, the joint distribution as a scatter plot, with the marginal distributions shown on the top and right edges.

In [86]:
color = "#774499"
# g = sns.JointGrid(mirrored(jdata["Dprime"].filled(0.0)), doubled(jdata["r14"].filled(0.0)), 
g = sns.JointGrid(jdata["Dprime"], jdata["r14"], 
                  size=9, xlim=(0, 60), ylim=(0, 35))
# g.plot(sns.regplot, sns.distplot, stats.pearsonr);
g.plot_marginals(sns.distplot, color=color, bins=10, rug=True, kde=False)
g.plot_joint(sns.regplot, color=color, ci=99, order=2, label="Quadratic regression fit with 99% confidence bounds")
g.ax_joint.legend(loc="upper left")
g.ax_joint.set_xlabel("Projected separation, $D'$")
g.ax_joint.set_ylabel("Proplyd radius $r_0$, $10^{14}$ cm")
g.fig.tight_layout()
g.fig.savefig("proplyd-joint-separation-size-new.pdf");

Now, split up into coarse bins according to the separation, and then look at the distribution of sizes in each bin.

First, sort on separation (it turns out that sorting doesn't work for colums that are masked arrays, so we fill the missing values with NaNs first):

In [87]:
jjdata = jdata[:-1].filled(np.nan)
jjdata.sort('Dprime')

Now split into 5 equal parts and make a violin plot of each one:

In [88]:
nsplit = 5
Dprime_splits = np.array_split(jjdata['Dprime'], nsplit)
r14_splits = np.array_split(jjdata['r14']*1e14*u.cm.to(u.au), nsplit)
split_names = ['D\' = {:.1f} to {:.1f}"'.format(x.min(), x.max()) for x in Dprime_splits]
sns.violinplot(r14_splits, 
               names=split_names,
               inner="points", color="PuBuGn")
plt.ylim(0.0, None)
plt.ylabel("Proplyd size, AU")
plt.xlabel("Range of projected separations");

There are 14 (or 13) sources in each bin.

In [89]:
[len(a) for a in r14_splits]
Out[89]:
[14, 14, 14, 14, 13]

So it looks like there is no significant change in the small proplyds with $r < 10^{15}$ cm (60 AU). The larger proplyds are reduced in number in the inner 3 bins.

So, for a first stab at the size distribution, I will try and fit something sensible to the 27 sources with $D' > 35.5$. And then play with having a varying upper cut-off.

But first, we add a new column with radius in AU, and then save the whole table to a file so that we can use it later more easily.

In [90]:
jjdata['rAU'] = jjdata['r14']*(1.e14*u.cm).to(u.AU)
jjdata[:4]
Out[90]:
IDChordr14RADecDprimePADprime_HAr14_HAVicR14rAU
AU
163-3230.1011089261723.252021213775:35:16.3125-5:23:22.62.27242083601276.2794710622.142.219.421.7384191269
168-326W0.1005342149013.233536463555:35:16.8225-5:23:25.9856.20797063143120.346612506nannannan21.6148562036
161-3240.1011089244443.252021158195:35:16.0435-5:23:24.316.44385851073256.891366896.053.525.821.7384187554
168-326E0.2435727716847.834163120965:35:16.8425-5:23:26.366.6574227895121.83327501nannannan52.3681459121
In [91]:
jjdata.write("Proplyd-datasets/merged-proplyd-sizes.tab",
             format="ascii.tab",
             fill_values=[('nan', '--')])
In [92]:
outer = jjdata['Dprime'] > 30.0
plt.hist([jjdata['rAU'], jjdata[~outer]['rAU']], 
         label=['All', 'D\' > 35"'],
         normed=True, cumulative=-1, bins=50)
plt.legend()
x = np.linspace(0.0, jjdata['rAU'].max())
r0, a = 50.0, 1.1
rmax = 150.0
b = 1.0 - 1.0/(1.0 + (rmax/r0)**(2*a))
plt.plot(x, (1.0/(1.0 + (x/r0)**(2*a)) - (1.0 - b))/b, 'w')
# plt.plot(x, , 'w')
plt.xlabel('Proplyd radius, r, AU')
plt.ylabel('Fraction of sources smaller or equal to r')
None

So it looks like a reasonable fit to the CDF is $1 - 1/(1 + (r/r_0)^{2a})$ with $r_0 = 55''$ and $a = 1.3$.

Which means that the PDF is $\sim r^{2a - 1} / (1 + (r/r_0)^{2a})^2$.

In [93]:
nbins = 15
plt.hist([jjdata['rAU'], doubled(jjdata[outer]['rAU'])], 
         label=['All', 'D\' > 35"'],
         normed=False, bins=nbins)
plt.legend()
x = np.linspace(0.0, jjdata['rAU'].max())
r0, a = 70.0, 1.1
plt.plot(x, 5*(len(jjdata)/nbins)*2*a*(x/r0)**(2*a - 1)/(1.0 + (x/r0)**(2*a))**2, 'w')
# plt.plot(x, , 'w')
plt.xlabel('Proplyd radius, r, AU')
plt.ylabel('PDF');

So that looks sort of alright, although it might be said to peak at too large a value.

So we can do the random sampling from a uniform deviate $\xi$, where $r = r_0 (\xi^{-1} - 1)^{1/(2a)}$. That is assuming no cut-off. I changed the parameters in the end to $r_0 = 50''$ and $a = 1.4$.

If there is a cut-off for $r > r_1$, then we can define $b = 1 - \left(1 + (r_1/r_0)^{2a}\right)^{-1}$ and then $r = r_0 \left((1 - b \xi)^{-1} - 1\right)^{1/(2a)}$.

In [186]:
r0, a = 70.0, 1.2
rmax = 300.0
b = 1.0 - 1.0/(1.0 + (rmax/r0)**(2*a))
print(b)
samples = [jjdata['rAU']]
names = ["proplyds"]
for i in range(4):
    xi = np.random.random_sample((len(jjdata),))
    samples.append(r0*((1.0/(1.0 - b*xi)) - 1.0)**(1./(2*a)))
    names.append('fake #'+str(i+1))
sns.violinplot(samples, names=names,
               inner="points", color="PuBuGn")
plt.ylim(0.0, 350.0)
plt.title("Proplyd size distribution: real versus simulated")
plt.ylabel("Proplyd size, AU")
plt.xlabel("Sample");
0.9704790420777791

That looks great. So now we can start worrying about the high-end cut-off and how it varies with separation.

In [196]:
r0, a = 50.0, 1.1
samples = []
names = []
for rmax in [100.0, 150.0, 250.0, 350.0, 350.0]:
    b = 1.0 - 1.0/(1.0 + (rmax/r0)**(2*a))
    xi = np.random.random_sample((14,))
    samples.append(r0*((1.0/(1.0 - b*xi)) - 1.0)**(1./(2*a)))
    names.append('fake, rmax = {:.0f}'.format(rmax))
sns.violinplot(samples, names=names,
               inner="points", color="PuBuGn")
plt.ylim(0.0, 350.0)
plt.title("Proplyd size distribution: simulated with different cut-offs rmax")
plt.ylabel("Proplyd size, AU")
plt.xlabel("Sample");

So now it looks like a shallower power law is necessary: $r_0 = 50''$, $a = 1.1$. Otherwise, there are not enough sources in the high tail of the size distributions and the cut-off doesn't get a chance to do anything.

Try adding these samples together, to see how well it fits the observed total distro.

In [197]:
total_fake = np.hstack(samples)
sns.violinplot([jjdata['rAU'], total_fake], names=['proplyds', 'simulated'],
               inner="points", color="PuBuGn")
plt.ylim(0.0, 350.0)
plt.title("Proplyd size distribution: real vs simulated using 4 different rmax")
plt.ylabel("Proplyd size, AU")
plt.xlabel("Sample");

So that is sort of OK. But we really need to have the upper cut-off be a function of true separation $D$. Then we need to choose the $D$'s from a distribution, like we did earlier.

First of all, decide what is the functional form of $r_1$ versus $D$. We can check the values of $D$ for sources in our previous random sample p that have $D'$ values that lie in each of our Dprime_splits.

In [97]:
D_pts = []
for Dp in Dprime_splits:
    mask = (p.Dprime > Dp.min()) & (p.Dprime <= Dp.max())
    DD = p.D[mask]
    print(DD.min(), np.median(DD), DD.max())
    D_pts.append(np.median(DD))
7.89284785984 15.1878423727 74.9801281496
18.9056267699 48.7921340846 150.343129311
27.3190119168 33.9150523529 66.1168237478
38.5921285925 49.6720448361 241.653731105
50.7206264744 59.9915236596 143.653760588
In [98]:
rmax_pts = [100, 150, 250, 350, 350]
plt.plot(D_pts, rmax_pts, 'o')
D_ppts = np.r_[np.linspace(0.0, 0.99, 100), np.linspace(1.0, 100.0, 200)]
plt.plot(D_ppts, 40*np.sqrt(D_ppts))
plt.plot(D_ppts, 6*D_ppts)
plt.ylim(0.0, 400.0)
plt.xlabel('True separation, $D$, arcsec')
plt.ylabel('Upper cut-off on proplyd size, $r_1$, AU');

So it looks like $r_1 = 40 (D\,/\,'')^{1/2}$ AU would work well. So we will now try and put it all together.

In [99]:
class SourcePopulation(object):

    def __init__(self, N, Dmin=0.0, Dmax=800.0, m=2.6, r0=50.0, a=1.1):
        """Return `N` samples of proplyds drawn from a radial distribution with density ~ D^-m
        Maximum and minimum radii can be specified with `Dmin` and `Dmax`. 
        If `Dmin = 0` then `m < 3` is required.  The proplyd size distribution 
        is described by `r0` and `a`"""
        assert(m < 3)

        self.N = N
        self.m = m
        self.Dmin = Dmin
        self.Dmax = Dmax
        
        # Radial distances from center
        xi = np.random.random_sample((N,))
        x0 = Dmin/Dmax
        self.D = Dmax*((1 - x0**(3.0 - m))*xi + x0**(3.0 - m))**(1.0/(3.0 - m))
        # Angle cosine with LOS
        xi = np.random.random_sample((N,))
        self.mu = 2*xi - 1.0
        # Position angle
        xi = np.random.random_sample((N,))
        self.PA = 360.0*xi

        # Inclination to plane of sky inc = [-90:90]
        self.inc = np.degrees(np.arcsin(self.mu))
        
        # Projected distance
        self.Dprime = self.D*np.sqrt(1.0 - self.mu**2)
        
        # Plane-of-sky coords
        self.Dec = self.Dprime*np.cos(np.radians(self.PA))
        self.RA = self.Dprime*np.sin(np.radians(self.PA))
        
        # Proplyd size in AU
#        rmax = 40*np.sqrt(self.D)
        rmax = 6*self.D
        b = 1.0 - 1.0/(1.0 + (rmax/r0)**(2*a))
        xi = np.random.random_sample((N,))
        self.rAU = r0*((1.0/(1.0 - b*xi)) - 1.0)**(1./(2*a))
        
In [100]:
num60 = []
mega_datasets = []
for _ in range(30000):
    p = SourcePopulation(190, m=2.1, Dmin=5.0, Dmax=250.0)
    num60.append(np.sum(p.Dprime < 60.0))
    mega_datasets.append(p)

We play with the total number of proplyds until we get the right number of proplyds at projected distances less than $60''$, which should be 69. Looks like 190 is about right.

In [101]:
sns.distplot(num60, bins=20, kde=False, fit=stats.norm);
In [102]:
#istart = 1133
#istart = 7777
istart = 999

scatter_kws = dict(marker='o', alpha=1.0, edgecolors=None)
line_kws = dict(lw=2)
fig, axes = plt.subplots(5, 5, sharex=True, sharey=True)
proplyd_colors=sns.color_palette("RdPu", 5)
sns.regplot(jjdata['Dprime'], jjdata['rAU'], ax=axes[0, 0], order=2, ci=99,
            color=proplyd_colors[3], scatter_kws=scatter_kws, line_kws=line_kws)
axes[0, 0].text(3, 340, 'Orion proplyds, N = '+str(len(jjdata)))
axes[0, 0].set_xlabel('')
i = istart
for p, ax in zip(mega_datasets[istart:istart+24], axes.flat[1:]):
    m60 = p.Dprime <= 60.0
    color_kws = scatter_kws.copy()
    color_kws.update(c=np.abs(p.inc[m60]), cmap="PuBuGn", vmin=0.0, vmax=90.0)
    g = sns.regplot(p.Dprime[m60], p.rAU[m60], ax=ax, order=2, ci=99,
                    color="#774499", scatter_kws=color_kws, line_kws=line_kws)
    ax.text(3, 340, 'Simulation #{}, N = {}'.format(i, m60.sum()))
    i += 1
            
for ax in axes[-1, :]:
    ax.set_xlabel("$D'$, arcsec")
for ax in axes[:, 0]:
    ax.set_ylabel('$r$, AU')
for ax in axes.flat:
    ax.tick_params(axis='both', which='major', labelsize=9)
    ax.set_xlim(0.0, 62.0)
    ax.set_ylim(0.0, 370.0)
    
cb = plt.colorbar(g.collections[0], ax=list(axes.flat), shrink=0.25, fraction=0.02)
cb.set_label("Inclination, deg")
    
fig.suptitle("Comparison of Orion proplyds with 24 simulated populations", fontsize='xx-large')
#fig.tight_layout(rect=(0, 0, 1, 0.98), h_pad=0.1, w_pad=0.1)
fig.subplots_adjust(left=0.1, right=0.87, bottom=0.1, top=0.96, wspace=0.05, hspace=0.05)
fig.set_size_inches(16, 16)

These work out great. The majority of the simulated datasets bear at least a passing resemblance to te real one. I had to change the cut-off size to be linear in $D$, otherwise there were too many simulated datasets with large proplyds at small distances.

For the simulated populations we show the absolute value of the inclination. Obviously, we can't know this for the proplyds (actually, maybe we could).

Note that the large proplyds at small radii tend to be at high inclinations, meaning that the true $D$ is much larger than the projected $D'$.

In [103]:
#istart = 1133
#istart = 7777
istart = 999
nsplit = 5
scatter_kws = dict(marker='o', alpha=0.6, edgecolors='none')
line_kws = dict(lw=2)
fig, axes = plt.subplots(5, 5, sharex=False, sharey=True)

# Plot real proplyd dataset
rAU_splits = np.array_split(jjdata['rAU'], nsplit)
Dp_splits = np.array_split(jjdata['Dprime'], nsplit)
labels = ['{:.0f}–{:.0f}'.format(x.min(), x.max()) for x in Dp_splits]
sns.violinplot(rAU_splits, inner="points", names=labels, color="RdPu", ax=axes[0, 0])
axes[0,0].set_xlabel('')

# Plot a bunch of fake datasets
for p, ax in zip(mega_datasets[istart:istart+24], axes.flat[1:]):
    m60 = p.Dprime <= 60.0
    sort_order = np.argsort(p.Dprime[m60])
    rAU_splits = np.array_split(p.rAU[m60][sort_order], nsplit)
    Dp_splits = np.array_split(p.Dprime[m60][sort_order], nsplit)
    labels = ['{:.0f}–{:.0f}'.format(x.min(), x.max()) for x in Dp_splits]
    sns.violinplot(rAU_splits, inner="points", names=labels, color="PuBuGn", ax=ax)

# Sort out the axis labels    
for ax in axes[:, 0]:
    ax.set_ylabel('$r$, AU')
for ax in axes[-1, :]:
    ax.set_xlabel("Ranges of $D'$, arcsec")
for ax in axes.flat:
    ax.tick_params(axis='both', which='major', labelsize=9)
    ax.set_ylim(0.0, 370.0)
    
fig.suptitle("Comparison of Orion proplyds with 24 simulated populations", fontsize='xx-large')
fig.tight_layout(rect=(0, 0, 1, 0.97), h_pad=0.2, w_pad=0.2)
fig.set_size_inches(16, 16)

The distribution of $\beta$

Finally we can get on to this. First we work out the density at the i-front on the axis and thus the equivalent spherical $\dot{M}$. Note that this is larger than the true mass-loss rate, since it doesn't account for the reduction in density away from the axis.

In [201]:
from astropy.constants import c, m_p, Ryd, L_sun, M_sun
from astropy.constants import h as hplanck
In [208]:
QH = 1e49/u.s
d_orion = 430
alpha_B = 2.6e-13*u.cm**3/u.s
alpha_ha = 1.0e-13*u.cm**3/u.s
omega = 0.1
# ionized sound speed
ci = (11.0*u.km/u.s).to(u.cm/u.s)
# mean mass per particle
m = (1.3*u.u).to(u.g)
# H alpha photon energy
E_ha = (hplanck*c/(6563.0*u.angstrom)).to(u.erg)

#iset = 1021
iset = 1999
p = mega_datasets[iset]
D = (p.D*d_orion*u.au).to(u.cm)
F = QH/(4.0*np.pi*D**2)
r = (p.rAU*u.AU).to(u.cm)
# u0 n + alpha_B omega n^2 r = F
ntilde = ci/(2*alpha_B*omega*r)
n = np.sqrt(ntilde**2 + F / (alpha_B*omega*r)) - ntilde
lambda_ = ci*n/F 
L_ha = E_ha*alpha_ha*r**3*n**2
S_ha = E_ha*alpha_ha*r*n**2/(4.0*np.pi*u.steradian)
#lambda_[(p.Dprime < 60.0) & (p.rAU > 50.0)].max()
(L_ha/L_sun).decompose().max()
Mdot = (n*ci*m*4*np.pi*r**2).to(u.solMass/u.year)
Mdot.mean()
Out[208]:
$1.41815\times 10^{-07} \; \mathrm{\frac{M_{\odot}}{yr}}$

We look at the distribution of the advection parameter $\lambda = n_0 u_0 / F$ versus the H$\alpha$ luminosity.

In [203]:
m60 = p.Dprime < 60.0
x = L_ha[m60]
y = lambda_[m60]
g = sns.jointplot(np.log10(x.value), np.log10(y))
g.ax_joint.set_ylabel(r'Advection parameter: $\log_{10} \,\lambda$')
#g.ax_joint.set_xlabel('Proplyd luminosity $\\log_{10}L(\\mathrm{H\\alpha})$ / [{:LATEX}])'.format(x.unit));
g.ax_joint.set_xlabel('Proplyd luminosity: $\\log_{{10}}\\,L$(H$\\alpha$) [{:LATEX}]'.format(x.unit))
Out[203]:
<matplotlib.text.Text at 0x151757b90>

And the surface brightness versus distance:

In [107]:
rootcolor = (0.5, 0.7, 0.9)
ncolors = 16
darkmap = sns.dark_palette(rootcolor, ncolors, reverse=True, as_cmap=True)
sns.palplot(sns.dark_palette(rootcolor, ncolors, reverse=True))

First we use a jointplot to see the marginal distributions.

In [204]:
x = p.Dprime[m60]
y = S_ha[m60]
z = p.rAU[m60]
inc = np.abs(p.inc[m60])
g = sns.jointplot(np.log10(x), np.log10(y.value), 
                  joint_kws={"s": z, "c": inc, "vmin": 0.0, "vmax": 90.0, "cmap": darkmap},
                  size=8)

Then we try with regplot, plotting a linear regression. The results vary a bit between samples, but generally the regression line is a bit steeper than $D'^{-2}$ because the average inclination becomes higher at smaller separations.

This compares pretty well with O'Dell (1998) Fig 2, once we take into account that he is using photon units.

In [205]:
g = sns.regplot(np.log10(x), np.log10(y.value), order=1, 
                robust=False,
                scatter_kws={"s": z, "c": inc, "vmin": 0.0, "vmax": 90.0, "cmap": darkmap}
            )
g.set_ylim(-1.0, 3.0)
xpts = np.linspace(*g.get_xlim())
for y0 in [2.0, 3.0, 4.0, 5.0]:
    g.plot(xpts, y0 - 2*xpts, '-', c='w', zorder=-100, lw=1)
cb = plt.colorbar(g.collections[0], ax=g)
cb.set_label("Inclination, deg")
g.set_xlabel("log10(D' / arcsec)")
g.set_ylabel("H alpha brightness, log10(S / erg/s/cm^2/sr)")
g.set_title("Surface brightness versus projected separation")
plt.gcf().set_size_inches(10, 8)

We can get a better regression by controlling for inclination as a confounding variable.

In [206]:
g = sns.regplot(np.log10(x), np.log10(y.value), order=3, 
                x_partial=1.0/np.cos(np.radians(inc)),
                y_partial=1.0/np.cos(np.radians(inc)),
#            robust=True,
                scatter_kws={"s": z, "c": inc, "vmin": 0.0, "vmax": 90.0, "cmap": darkmap}
            )
g.set_ylim(-1.0, 3.0)
xpts = np.linspace(*g.get_xlim())
for y0 in [2.0, 3.0, 4.0, 5.0]:
    g.plot(xpts, y0 - 2*xpts, '-', c='w', zorder=-100, lw=1)
cb = plt.colorbar(g.collections[0], ax=g)
cb.set_label("Inclination, deg")
g.set_xlabel("log10(D' / arcsec)")
g.set_ylabel("H alpha brightness, log10(S / erg/s/cm^2/sr)")
g.set_title("Controlling for inclination as a confounding variable")
plt.gcf().set_size_inches(10, 8)
In [209]:
#m60 = np.ones_like(p.Dprime).astype(bool)
m60 = p.Dprime < 60
x = p.Dprime[m60]
y = Mdot[m60]
z = p.rAU[m60]
inc = np.abs(p.inc[m60])
g = sns.jointplot(np.log10(x), np.log10(y.value), 
                  joint_kws={"s": z, "c": inc, "vmin": 0.0, "vmax": 90.0, "cmap": darkmap},
                  size=8)

Here is the mass loss rate distribution for a bunch of simulated populations. There is a general tendency for the regression line to fall slightly with projected distance. But for the highest mass-loss rates, there doesn't seem to be much of a trend with distance at all.

In [112]:
#istart = 999
istart = 1999

scatter_kws = dict(marker='o', alpha=1.0, edgecolors=None)
line_kws = dict(lw=2)
fig, axes = plt.subplots(5, 5, sharex=True, sharey=True)
i = istart
for p, ax in zip(mega_datasets[istart:istart+25], axes.flat):
    m60 = p.Dprime <= 60.0
    
    D = (p.D*d_orion*u.au).to(u.cm)
    F = QH/(4.0*np.pi*D**2)
    r = (p.rAU*u.AU).to(u.cm)
    ntilde = ci/(2*alpha_B*omega*r)
    n = np.sqrt(ntilde**2 + F / (alpha_B*omega*r)) - ntilde
    Mdot = (n*ci*m*4*np.pi*r**2).to(u.solMass/u.year)

    color_kws = scatter_kws.copy()
    color_kws.update(c=np.abs(p.inc[m60]), s=0.5*p.rAU[m60], cmap="PuBuGn", vmin=0.0, vmax=90.0)
    g = sns.regplot(p.Dprime[m60], Mdot[m60]/1.e-7, ax=ax, order=2, ci=99,
                    color="#774499", scatter_kws=color_kws, line_kws=line_kws)
    ax.text(0.02, 0.92, 'Simulation #{}, N = {}'.format(i, m60.sum()), transform=ax.transAxes)
    i += 1
            
for ax in axes[-1, :]:
    ax.set_xlabel("$D'$, arcsec")
for ax in axes[:, 0]:
    ax.set_ylabel(r'$\dot{M}$, 1e-7 solar masses / yr')
for ax in axes.flat:
    ax.tick_params(axis='both', which='major', labelsize=9)
    ax.set_xlim(0.0, 62.0)
    ax.set_ylim(-0.3, 20.0)
    
cb = plt.colorbar(g.collections[0], ax=list(axes.flat), shrink=0.25, fraction=0.02)
cb.set_label("Inclination, deg")
    
fig.suptitle("Mass loss rates for 25 simulated populations", fontsize='xx-large')
#fig.tight_layout(rect=(0, 0, 1, 0.98), h_pad=0.1, w_pad=0.1)
fig.subplots_adjust(left=0.1, right=0.87, bottom=0.1, top=0.96, wspace=0.05, hspace=0.05)
fig.set_size_inches(16, 16)

Next, we do the same for the $\beta$ values, which are directly proportional so the graphs look pretty identical.

In [113]:
#istart = 999
istart = 1999

# proplyd flow Mach number at shock
Mach = 3.0
# O-star wind parameters
Mdw = 3.5e-7*u.solMass/u.year
Vw = (1200*u.km/u.s).to(u.cm/u.s)
scatter_kws = dict(marker='o', alpha=1.0, edgecolors=None)
line_kws = dict(lw=2)
fig, axes = plt.subplots(5, 5, sharex=True, sharey=True)
i = istart
for p, ax in zip(mega_datasets[istart:istart+25], axes.flat):
    m60 = p.Dprime <= 60.0
    
    D = (p.D*d_orion*u.au).to(u.cm)
    F = QH/(4.0*np.pi*D**2)
    r = (p.rAU*u.AU).to(u.cm)
    ntilde = ci/(2*alpha_B*omega*r)
    n = np.sqrt(ntilde**2 + F / (alpha_B*omega*r)) - ntilde
    Mdot = (n*ci*m*4*np.pi*r**2).to(u.solMass/u.year)
    beta = Mdot*Mach*ci/(Mdw*Vw)
    color_kws = scatter_kws.copy()
    color_kws.update(c=np.abs(p.inc[m60]), s=0.5*p.rAU[m60], cmap="PuBuGn", vmin=0.0, vmax=90.0)
    g = sns.regplot(p.Dprime[m60], beta[m60], ax=ax, order=2, ci=99,
                    color="#774499", scatter_kws=color_kws, line_kws=line_kws)
    ax.text(0.02, 0.92, 'Simulation #{}, N = {}'.format(i, m60.sum()), transform=ax.transAxes)
    i += 1
            
for ax in axes[-1, :]:
    ax.set_xlabel("$D'$, arcsec")
for ax in axes[:, 0]:
    ax.set_ylabel(r'$\beta$')
for ax in axes.flat:
    ax.tick_params(axis='both', which='major', labelsize=9)
    ax.set_xlim(0.0, 62.0)
    ax.set_ylim(-0.003, 0.15)
    
cb = plt.colorbar(g.collections[0], ax=list(axes.flat), shrink=0.25, fraction=0.02)
cb.set_label("Inclination, deg")
    
fig.suptitle("Wind-wind beta values for 25 simulated populations", fontsize='xx-large')
#fig.tight_layout(rect=(0, 0, 1, 0.98), h_pad=0.1, w_pad=0.1)
fig.subplots_adjust(left=0.1, right=0.87, bottom=0.1, top=0.96, wspace=0.05, hspace=0.05)
fig.set_size_inches(16, 16)

We can sum all the simulation samples to get better statistics

In [114]:
multiplier = 10000
bigp = SourcePopulation(190*multiplier, m=2.1, Dmin=5.0, Dmax=250.0)

Joint distribution of size and projected separation for the large sample. We can clearly see the corners due to the sharp cut-offs.

In [115]:
sns.jointplot(bigp.Dprime, bigp.rAU, stat_func=None, kind='hex', size=10, xlim=(0, 60), ylim=(0, 350))
Out[115]:
<seaborn.axisgrid.JointGrid at 0x139c43c10>
In [116]:
m60 = bigp.Dprime <= 60.0
m30 = bigp.Dprime <= 30.0
mm30 = bigp.D <= 30.0
mm15 = bigp.D <= 15.0
D = (bigp.D*d_orion*u.au).to(u.cm)
F = QH/(4.0*np.pi*D**2)
r = (bigp.rAU*u.AU).to(u.cm)
ntilde = ci/(2*alpha_B*omega*r)
n = np.sqrt(ntilde**2 + F / (alpha_B*omega*r)) - ntilde
Mdot = (n*ci*m*4*np.pi*r**2).to(u.solMass/u.year)
beta = Mdot*Mach*ci/(Mdw*Vw)

Histograms of the log of the $\beta$ values for the different subsamples. The 60 arcsec sample tends to have slightly smaller values than the 30 arcsec sample. Also shown is the sample with true physical separation $< 30''$. This would be relevant if that was the physical extent of the hypersonic wind region. This last sample has a narrow distribution of $\beta$, missing both the high values and the low values. For instance, it drops to zero at about $\beta = 0.1$.

In [117]:
hist_kws = {"range": (-5.0, -0.5), "alpha": 0.6}
nbins = 100
#with sns.color_palette("RdPu_r", 4):
with sns.color_palette("coolwarm", 4):
    ax = sns.distplot(np.log10(beta[m60]), kde=False, bins=nbins, hist_kws=hist_kws, label="D' <= 60")
    sns.distplot(np.log10(beta[m30]), kde=False, bins=nbins, hist_kws=hist_kws, ax=ax, label="D' <= 30")
    sns.distplot(np.log10(beta[mm30 & ~mm15]), kde=False, bins=nbins, hist_kws=hist_kws, ax=ax, label="15 < D <= 30")
    sns.distplot(np.log10(beta[mm15]), kde=False, bins=nbins, hist_kws=hist_kws, ax=ax, label="D <= 15")
    ax.set_xlim(*hist_kws["range"])
    ax.legend()
    ax.set_xlabel('log10(beta)')
    ax.set_ylabel('PDF');

In the following two plots we show the joint distribution of inclination and $\beta$ for three different subsamples of the $D' <= 30''$ sample:

  1. True distance $D <= 15''$. The inner core, which is definitely within the supersonic wind region.. This has a rather flat distribution in $\beta$ that peaks at 0.01–0.04 and seems almost independent of inclination. The inclination distribition is roughly uniform in $\sin i$, as expected.
  2. True distance with $15'' < D <= 30''$. The intermediate shell, which may or may not be in the supersonic wind zone. These still have the same $i$ distribution as the inner core sample, but the $\beta$ distribution peaks at smaller values of 0.005–0.02.
  3. Projected distance $D' <= 30''$ but $D > 30''$. The outer interlopers, which should not show any wind-wind bowshocks since they are simply projected onto the inner core.
In [118]:
mask = mm15
g = sns.jointplot(np.abs(bigp.inc[mask]), beta[mask], 
                  stat_func=None, kind='hex', size=6, xlim=(0, 90), ylim=(0, 0.1))
g.set_axis_labels('inclination', 'beta')
nexpect = mask.sum()/multiplier 
g.ax_joint.text(2, 0.095, 
                "Sample with true separation $D < 15''$, $N = {:.1f}$".format(nexpect), 
                fontsize='x-large');
In [119]:
mask = mm30 & ~mm15
g = sns.jointplot(np.abs(bigp.inc[mask]), beta[mask], 
                  stat_func=None, kind='hex', size=6, xlim=(0, 90), ylim=(0, 0.1))
g.set_axis_labels('inclination', 'beta')
nexpect = mask.sum()/multiplier 
g.ax_joint.text(2, 0.095, 
                "Sample with true separation $15'' < D < 30''$, $N = {:.1f}$".format(nexpect), 
                fontsize='x-large');
In [120]:
mask = m30 & ~mm30
g = sns.jointplot(np.abs(bigp.inc[mask]), beta[mask], 
                  stat_func=None, kind='hex', size=6, xlim=(0, 90), ylim=(0, 0.1))
g.set_axis_labels('inclination', 'beta')
nexpect = mask.sum()/multiplier 
g.ax_joint.text(5, 0.09, 
                "Sample with $D > 30''$ but $D' < 30''$, $N = {:.1f}$".format(nexpect), 
                fontsize='x-large');

The next three graphs show the $\beta$ distribution in more detail for the three sub-samples. Separate histograms are shown for low and high inclinations. For the two inner sub-samples there is no difference between the two inclination ranges, which must be true since the selection criteria are independent of $i$. For the outer interlopers, on the other hand, there is a difference: smaller $\beta$ for higher inclinations, due to the implicit dependence of $D$ on $i$ for this sub-sample that is caused by imposing a cut-off in $D'$.

In [121]:
incmask = np.abs(bigp.inc) < 30.0
hist_kws = {"range": (-0.001, 0.101), "alpha": 0.4}
nbins = 100
with sns.color_palette("coolwarm", 2):
    ax = sns.distplot(beta[mm15 & incmask], kde=False, bins=nbins, hist_kws=hist_kws, label="|i| < 30 deg")
    sns.distplot(beta[mm15 & ~incmask], kde=False, bins=nbins, hist_kws=hist_kws, ax=ax, label="|i| >= 30 deg")
    ax.set_xlim(*hist_kws["range"])
    ax.legend()
    ax.set_title('Inner core: D <= 15\'\'')
    ax.set_xlabel('beta')
    ax.set_ylabel('PDF');
In [122]:
with sns.color_palette("coolwarm", 2):
    ax = sns.distplot(beta[mm30 & ~mm15 & incmask], kde=False, bins=nbins, hist_kws=hist_kws, label="|i| < 30 deg")
    sns.distplot(beta[mm30 & ~mm15 & ~incmask], kde=False, bins=nbins, hist_kws=hist_kws, ax=ax, label="|i| >= 30 deg")
    ax.set_xlim(*hist_kws["range"])
    ax.legend()
    ax.set_title('Intermediate shell: D = 15\'\'–30\'\'')
    ax.set_xlabel('beta')
    ax.set_ylabel('PDF');
In [123]:
incmask = np.abs(bigp.inc) < 60.0
with sns.color_palette("coolwarm", 2):
    ax = sns.distplot(beta[m30 & ~mm30 & incmask], kde=False, bins=nbins, hist_kws=hist_kws, label="|i| < 60 deg")
    sns.distplot(beta[m30 & ~mm30 & ~incmask], kde=False, bins=nbins, hist_kws=hist_kws, ax=ax, label="|i| >= 60 deg")
    ax.set_xlim(*hist_kws["range"])
    ax.legend()
    ax.set_title('Outer interlopers: D\' < 30\'\' but D > 30\'\'')
    ax.set_xlabel('beta')
    ax.set_ylabel('PDF');
In [124]:
sns.distplot(np.sin(np.radians(bigp.inc[mm30])), kde=False, bins=nbins)
Out[124]:
<matplotlib.axes._subplots.AxesSubplot at 0x1548d0e90>

Finally, bowshock shapes

In [125]:
import sys
sys.path.insert(0, "./conic-projection")
import conproj_utils
In [126]:
conic = conproj_utils.Conic(A=2.0, th_conic=15.0)
In [127]:
trange = conic.make_t_array([conic.tparallel(0.0), np.pi/2])
In [128]:
plt.plot(conic.x(trange), conic.y(trange))
plt.axis('equal')
plt.xlim(-3.0, 1.2)
Out[128]:
(-3.0, 1.2)
In [129]:
def A(beta):
    return 1.5/(1.0 - np.sqrt(beta))

def thc(beta, xi=1.0):
    arg = 3*(1.0/(1.0 - np.sqrt(beta)) - xi*(1.0 + np.sqrt(beta))**2/(1.0 - xi*beta)**2/(1 + 0.2*xi*beta))
    return np.sign(arg)*np.arctan(np.sqrt(np.abs(arg)))



beta_grid = np.r_[np.linspace(0.0, 0.01, 100), np.linspace(0.011, 0.1, 100)]
with sns.color_palette("PuRd_d", 4):
    plt.plot(beta_grid, A(beta_grid), label='A = R_c/R_0')
    plt.plot(beta_grid, thc(beta_grid), label='theta_c (isotropic)')
    plt.plot(beta_grid, thc(beta_grid, xi=0.8), label='theta_c (proplyd)')
    plt.plot(beta_grid, thc(beta_grid, xi=0.6), label='theta_c (xi=0.6)')
    plt.hlines([np.pi/4, -np.pi/4], -1.0, 1.0, lw=1, alpha=0.3)
    plt.legend(loc='upper left');
    plt.xlim(-0.002, 0.102)
    plt.title('Bowshock shape parameters')
    plt.xlabel('beta')

Note that all the shapes are hyperbolae for the CRW isotropic models, so $\theta_\mathrm{c} < 0$. For the proplyds, on the other hand, $\theta_\mathrm{c} > 0$ for small values of $\beta$.

In [129]:
 
In [221]:
Dmax = 15
mask = mm15
g = sns.jointplot(A(beta[mask]), np.degrees(thc(beta[mask], xi=0.8)), 
                  stat_func=None, kind='hex', size=6, 
                  xlim=(1.5, 3.0), ylim=(-90, 90))
g.set_axis_labels('A', 'th_c, degrees')
nexpect = mask.sum()/multiplier 
g.ax_joint.text(1.55, 0.0, 
                "Sample with true separation $D < {}''$, $N = {:.1f}$".format(Dmax, nexpect), 
                fontsize='x-large');
In [222]:
Aprime, qprime = [], []
Aprime_p, qprime_p = [], []
for inc, b in zip(bigp.inc[mask], beta.value[mask]):
    q = np.sqrt(b)/(1.0 + np.sqrt(b))
    c = conproj_utils.Conic(A=A(b), th_conic=np.degrees(thc(b)))
    Aprime.append(c.Aprime(inc))
    qprime.append(q*c.g(inc))
    
    cp = conproj_utils.Conic(A=A(b), th_conic=np.degrees(thc(b, xi=0.8)))
    Aprime_p.append(cp.Aprime(inc))
    qprime_p.append(q*cp.g(inc))
In [223]:
Aprime = np.array(Aprime)
qprime = np.array(qprime)
Aprime_p = np.array(Aprime_p)
qprime_p = np.array(qprime_p)
In [224]:
goodmask = np.isfinite(Aprime)
goodmask_p = np.isfinite(Aprime_p)
print(stats.describe(Aprime[goodmask])[:4])
print(stats.describe(Aprime_p[goodmask_p])[:4])
print(stats.describe(qprime[goodmask])[:4])
print(stats.describe(qprime_p[goodmask_p])[:4])
(73047, (1.5075612040900508, 285.55926321729822), 2.3052402008348074, 3.984445796837325)
(95806, (1.0010229164138955, 38.719948564599818), 1.8511570729351388, 0.19150461773847796)
(73047, (0.0044560050199983631, 0.51232049429739879), 0.1968657869012751, 0.0064566668561296644)
(95806, (0.0044548969596591463, 352.99730987866036), 0.39060483814703878, 2.8095088149590044)
In [225]:
badmask = qprime > 1.0
badmask_p = qprime_p > 1.0
s = "Arc fraction = {:.3f}, q' > 1 fraction = {:.3f}"
print("Isotropic:", s.format(goodmask.sum()/len(goodmask), 
                            badmask.sum()/len(goodmask)))
print("Proplyd:", s.format(goodmask_p.sum()/len(goodmask_p), 
                          badmask_p.sum()/len(goodmask_p)))
Isotropic: Arc fraction = 0.748, q' > 1 fraction = 0.000
Proplyd: Arc fraction = 0.981, q' > 1 fraction = 0.052
In [226]:
sns.jointplot(beta.value[mask][goodmask_p], 
              np.abs(bigp.inc[mask][goodmask_p]), 
              kind='hex')
Out[226]:
<seaborn.axisgrid.JointGrid at 0x15a982b90>
In [227]:
good = goodmask & ~badmask
good_p = goodmask_p & ~badmask_p
In [228]:
hist_kws = {"range": (1.0, 3.5), "alpha":0.6}
with sns.color_palette("coolwarm_r", 2):
    ax = sns.distplot(
        Aprime_p[good_p], label="proplyd",
        kde=False, bins=50, hist_kws=hist_kws)
    sns.distplot(
        Aprime[good], label="isotropic",
        ax=ax,
        kde=False, bins=50, hist_kws=hist_kws)    
ax.set_xlabel("A'")
ax.legend();
In [229]:
hist_kws = {"range": (0.0, 1.0), "alpha": 0.6}
with sns.color_palette("coolwarm_r", 2):
    ax = sns.distplot(qprime_p[good_p], 
                      label="proplyd",
                      kde=False, bins=50, hist_kws=hist_kws)
    sns.distplot(qprime[good], 
                 label="isotropic",
                 ax=ax, kde=False, bins=50, hist_kws=hist_kws)
ax.set_xlabel("q'")
ax.legend();
In [139]:
from functools import reduce
reduce?

Different ways of merging multiple dicts. They all work.

In [140]:
def dmerge(*dicts):
    """Merge an arbitrary number of dicts together.
    Uses a simple `update` technique.  Later dicts in 
    the argument list override earlier ones if there are 
    repeated keys. 
    """
    result = {}
    for d in dicts:
        result.update(d)
    return result

def dmerge2(*dicts):
    "See http://dietbuddha.blogspot.mx/2013/04/python-expression-idiom-merging.html"
    return {k:v for d in dicts for k, v in d.items()}

def dmerge3(*dicts):
    "See Power Baker's Feb 2014 comment on above page"
    return reduce(lambda x, y: x.update(y) or x, dicts, {})

dmerge({"a": 1}, {"b": 2}, dict(c=3, d=4))
Out[140]:
{'b': 2, 'c': 3, 'a': 1, 'd': 4}
In [242]:
cmap_i = sns.blend_palette(["MidnightBlue", "PowderBlue"], 
                           256, as_cmap=True)
cmap_p = sns.blend_palette(["IndianRed", "white"], 
                           256, as_cmap=True)
nsample = 100
istart = 271
scatter_kws={
    "alpha": 0.7, 
    "vmin": 0.0, "vmax": 0.9,
    }
iso_kws={
    "s": 2*bigp.rAU[mask][good][istart:istart+nsample], 
    "c": np.abs(np.sin(np.radians(bigp.inc[mask][good][istart:istart+nsample]))),
    "cmap": cmap_i
}
prop_kws={
    "s": 2*bigp.rAU[mask][good_p][istart:istart+nsample], 
    "c": np.abs(np.sin(np.radians(bigp.inc[mask][good_p][istart:istart+nsample]))),
    "cmap": cmap_p
}
g = sns.jointplot(
    qprime[good][istart:istart+nsample], 
    Aprime[good][istart:istart+nsample], 
    stat_func=None, kind='scatter', size=8, 
    xlim=(0.0, 0.5), ylim=(0.0, 3.5), 
    joint_kws=dmerge(scatter_kws, iso_kws))
g.ax_joint.scatter(
    qprime_p[good_p][istart:istart+nsample], 
    Aprime_p[good_p][istart:istart+nsample], 
    **dmerge(scatter_kws, prop_kws)
)
sns.distplot(qprime_p[good_p][istart:istart+nsample],
             kde=False, ax=g.ax_marg_x, color="IndianRed")
sns.distplot(Aprime_p[good_p][istart:istart+nsample],
             kde=False, ax=g.ax_marg_y, vertical=True, color="IndianRed")
g.set_axis_labels('q\'', 'A\'')
rmin, rmax = stats.describe(bigp.rAU[mask][good_p][istart:istart+nsample])[1]
g.ax_joint.text(
    0.01, 0.1, 
    "Sample with true separation $D < {}''$, $N = {:d}$, $r = {:.0f}$–${:.0f}$ AU".format(Dmax, nsample, rmin, rmax), 
    fontsize='x-large')
g.fig.savefig('monte-carlo-Aprime-qprime.pdf');
In [182]:
import matplotlib
In [184]:
matplotlib.colors.SymLogNorm?
In [1919]:
plt.plot(conic.x(trange), conic.y(trange))
plt.axis([-3, 3, -3, 3])
Out[1919]:
[-3, 3, -3, 3]

Scratch area

In [ ]:
 
In [ ]:
 
In [1828]:
sns.palplot(sns.color_palette("colorblind"))
In [1851]:
sns.jointplot?
In [1304]:
repr(jjdata[1]['Dprime_HA'])
Out[1304]:
'nan'
In [1480]:
a = dict(a=1, b=2)
b = {'x': 2}
c = a.copy()
c.update(b)
c
Out[1480]:
{'x': 2, 'a': 1, 'b': 2}
In [1006]:
import astropy
astropy.__version__
from astropy import conf
conf.max_lines = -1
In [1295]:
astropy.io.ascii.write?
In [1045]:
(3e14*u.cm).to(u.au)
Out[1045]:
$20.0538 \; \mathrm{AU}$
In [1052]:
q = (1e14*u.cm)/(1.0*u.au)
"{0.value:0.03e} {0.unit:latex}".format(q)
Out[1052]:
'1.000e+14 $\\mathrm{\\frac{cm}{AU}}$'
In [1060]:
plt.hist?
In [1018]:
jjdata
Out[1018]:
IDChordr14RADecDprimePADprime_HAr14_HAVicR14
163-3230.1011089261723.252021213775:35:16.3125-5:23:22.62.27242083601276.2794710622.142.219.4
168-326W0.1005342149013.233536463555:35:16.8225-5:23:25.9856.20797063143120.346612506nannannan
161-3240.1011089244443.252021158195:35:16.0435-5:23:24.316.44385851073256.891366896.053.525.8
168-326E0.2435727716847.834163120965:35:16.8425-5:23:26.366.6574227895121.83327501nannannan
168-3280.1269980216774.08470622965:35:16.76-5:23:28.1556.90872052245140.1817864466.642.821.0
163-3170.1816984797395.844066720865:35:16.27-5:23:16.3257.13645181477336.0806500776.915.027.1
166-3160.1158783807463.727059189255:35:16.6065-5:23:15.9557.2156303579617.18340245047.012.514.8
161-3280.1995687367046.418837484875:35:16.0625-5:23:27.8057.77642727101230.4040876157.749.120.0
167-3170.2495569195588.026634513635:35:16.7455-5:23:16.2457.8300719740232.50361387187.837.935.8
158-3230.170654665875.488858547425:35:15.8045-5:23:22.4559.8529615849272.2890310729.426.330.6
158-3260.2323259753567.472426312635:35:15.8285-5:23:25.549.86108732118254.1608255389.611.331.6
161-3140.1860494671315.984009887465:35:16.095-5:23:14.05510.3755605918327.94349148110.245.317.1
158-3270.4002366670412.87302893055:35:15.7755-5:23:26.58510.9362570123250.0220232810.616.635.5
157-3230.1000000000023.21635422015:35:15.705-5:23:22.4511.3380074485272.01433163410.972.518.4
171-3340.2094863360216.737822609055:35:17.0625-5:23:34.11514.3834063732141.56334754714.294.722.9
170-3370.41624042726513.38776654795:35:16.98-5:23:37.20516.2954814572151.76426977816.212.236.8
169-3380.09449034060543.03914405765:35:16.883-5:23:38.09516.4818823889157.67488234516.472.816.1
176-3250.2699999999978.684156394045:35:17.569-5:23:24.78516.618585154496.691687202416.386.923.5
154-3240.09999999999593.216354219925:35:15.342-5:23:24.0616.795668529265.86353272216.633.210.3
153-3210.08000000000592.573083376235:35:15.342-5:23:21.3116.8224369079275.24729804916.971.28.4
159-3380.1915755186916.161747280015:35:15.886-5:23:38.0817.5053947123209.52961366117.25.013.5
171-3400.5462728212717.57006893995:35:17.0555-5:23:39.80519.120982071152.47369841119.1123.323.2
152-3190.4021371270612.93415445665:35:15.197-5:23:18.6619.3754821445282.48440358319.1618.214.8
155-3380.40262236469412.94976141775:35:15.505-5:23:37.45520.4534702584224.42783197920.4817.029.7
173-3410.1694140229225.448955075615:35:17.327-5:23:41.4222.6072175472145.23369658122.484.118.1
174-3050.07789927861632.505516735165:35:17.368-5:23:04.71522.609238452636.6746518612nannannan
170-3010.1669890714935.370960047995:35:16.9735-5:23:00.68523.434419846318.9559294221nannannan
149-3290.09482644458043.049954351995:35:14.914-5:23:28.99523.9457901205255.126441327nannannan
180-3310.32619216590510.49149549365:35:18.057-5:23:30.74525.0691007756108.36035226325.1212.229.3
177-341W0.60729734091719.53283365285:35:17.684-5:23:41.09525.7875745289135.03756966625.8420.451.6
147-3230.3103594930349.982260651545:35:14.7055-5:23:22.77526.2573193506270.160158314nannannan
177-341E0.04233254936171.361564737855:35:17.736-5:23:41.06526.3212791211133.79527876nannannan
182-3160.2283166074847.34347083995:35:18.187-5:23:31.45527.1353607731108.492030854nannannan
182-3160.2245153617237.221209311455:35:18.2485-5:23:15.4827.652686160974.5461527337nannannan
160-3500.1836119728425.905611437015:35:15.958-5:23:49.727.8934378515195.710045272nannannan
159-3500.62592227142620.13187739125:35:15.9375-5:23:50.12528.3860230251196.0726060128.3520.144.5
148-3050.1418755465564.563220128895:35:14.79-5:23:04.56530.968687221306.184249269nannannan
178-2580.65061247390520.92600176065:35:17.8295-5:22:57.9332.20124304239.3004420701nannannan
166-2500.1493390719224.803273541945:35:16.586-5:22:50.0732.82935280923.18779803885nannannan
163-2490.2391518179047.691969587495:35:16.332-5:22:48.79534.1103502976356.693133312nannannan
175-2510.1982186032466.375412410435:35:17.4775-5:22:51.0335.236510725225.4450047686nannannan
170-2490.493731641915.88015855:35:16.9675-5:22:48.13535.519345688512.2276895598nannannan
175-3550.2282288546517.340648397945:35:17.5345-5:23:54.9735.8813209789153.53608556nannannan
176-2520.1194711806343.842616360075:35:17.641-5:22:51.4835.959262435329.2690027106nannannan
189-3290.1407942246124.52844098495:35:18.8795-5:23:28.8236.566800050299.3991034189nannannan
170-4000.294797683469.481737732565:35:16.9535-5:23:59.5537.4230608759168.730166937nannannan
179-3540.1201179044573.863417289045:35:17.965-5:23:53.5337.9994898857143.844650843nannannan
139-3200.1507415519184.848382266495:35:13.9075-5:23:20.04538.2771290438274.199809704nannannan
174-4000.2947975908469.481734753795:35:17.3775-5:24:00.2939.8504791104159.975997997nannannan
142-3010.96797786883331.13359703345:35:14.1165-5:23:00.7141.459036843302.274726344nannannan
168-4040.3001709084369.654559680845:35:16.758-5:24:04.30541.6886704111173.949648739nannannan
166-4060.1836106584325.905569160955:35:16.569-5:24:06.143.2799924768177.918900161nannannan
181-2470.3030828309819.748217424515:35:18.084-5:22:46.943.333196423733.9442221803nannannan
181-4010.1358807278894.37040552575:35:18.0645-5:24:01.2345.2169839779148.084746004nannannan
154-2400.60567146710619.48053979195:35:15.374-5:22:39.6646.1530184606339.352492803nannannan
191-3500.51536760650616.57604776065:35:19.075-5:23:49.6847.3345320128124.531172nannannan
165-235S0.43870683708414.11036586825:35:16.4725-5:22:35.25547.5937335990.157230005023nannannan
165-235N0.1344060910844.322975982585:35:16.4835-5:22:34.9747.87946244090.352875196377nannannan
173-2360.52174359641116.7811221815:35:17.349-5:22:35.549.15950733815.6003588897nannannan
190-2510.1379422113664.436710136495:35:19.0345-5:22:50.46550.225115768749.8522092634nannannan
167-2310.32854879080510.5672928985:35:16.728-5:22:31.1851.81903175474.36755966222nannannan
174-4140.2695430296438.669458608765:35:17.385-5:24:13.9752.9402401377164.938002454nannannan
159-4180.31423655848810.10696080995:35:15.8965-5:24:17.8755.6697303594188.752413361nannannan
133-3530.1995681037266.418817126075:35:13.2955-5:23:53.05556.1336398771237.443857624nannannan
150-2310.1982203563046.375468794975:35:15.0175-5:22:30.8356.3241570027337.45143637nannannan
182-4130.99894955477932.12975616135:35:18.2155-5:24:13.15556.7015784227152.52558854456.736.948.7
204-3300.113.537989642045:35:20.42-5:23:29.65559.472001930796.5725559439nannannan
154-2250.1803214866475.799777745435:35:15.362-5:22:25.1559.9986653263344.083616824nannannan
205-3300.3103594907529.982260578115:35:20.4725-5:23:29.72560.258881296296.5533700094nannannan
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [934]:
data.pformat?
In [935]:
from astropy.io import ascii
In [936]:
ascii.write(data, Writer=ascii.Latex)
\begin{table}
\begin{tabular}{cccc}
ID & d & r14 & VicR14 \\
163-323 & 2.14 & 2.2 & 19.4 \\
161-324 & 6.05 & 3.5 & 25.8 \\
168-328 & 6.64 & 2.8 & 21.0 \\
163-317 & 6.91 & 5.0 & 27.1 \\
166-316 & 7.01 & 2.5 & 14.8 \\
161-328 & 7.74 & 9.1 & 20.0 \\
167-317 & 7.83 & 7.9 & 35.8 \\
158-323 & 9.42 & 6.3 & 30.6 \\
158-326 & 9.6 & 11.3 & 31.6 \\
161-314 & 10.24 & 5.3 & 17.1 \\
158-327 & 10.6 & 16.6 & 35.5 \\
157-323 & 10.97 & 2.5 & 18.4 \\
171-334 & 14.29 & 4.7 & 22.9 \\
170-337 & 16.2 & 12.2 & 36.8 \\
176-325 & 16.38 & 6.9 & 23.5 \\
169-338 & 16.47 & 2.8 & 16.1 \\
154-324 & 16.63 & 3.2 & 10.3 \\
153-321 & 16.97 & 1.2 & 8.4 \\
159-338 & 17.2 & 5.0 & 13.5 \\
171-340 & 19.11 & 23.3 & 23.2 \\
152-319 & 19.16 & 18.2 & 14.8 \\
155-338 & 20.48 & 17.0 & 29.7 \\
173-341 & 22.48 & 4.1 & 18.1 \\
180-331 & 25.12 & 12.2 & 29.3 \\
177-341W & 25.84 & 20.4 & 51.6 \\
159-350 & 28.35 & 20.1 & 44.5 \\
182-413 & 56.7 & 36.9 & 48.7 \\
244-440 & 142.3 & 104.0 & 180.6 \\
\end{tabular}
\end{table}
In [937]:
ascii.Latex?
In [938]:
ascii.latexdicts
Out[938]:
{'template': {'caption': 'caption',
  'col_align': 'col_align',
  'preamble': 'preamble',
  'tabletype': 'tabletype',
  'data_end': 'data_end',
  'tablealign': 'tablealign',
  'header_start': 'header_start',
  'units': {'col1': 'unit of col1', 'col2': 'unit of col2'},
  'header_end': 'header_end',
  'data_start': 'data_start',
  'tablefoot': 'tablefoot'},
 'doublelines': {'header_start': '\\hline \\hline',
  'header_end': '\\hline\\hline',
  'tabletype': 'table',
  'data_end': '\\hline\\hline'},
 'AA': {'header_start': '\\hline \\hline',
  'header_end': '\\hline',
  'tabletype': 'table',
  'data_end': '\\hline'}}
In [743]:
data[data['ID'] == '177-341']['ID']
Out[743]:
<Column name='ID' unit=None format=None description=None>
array(['177-341'], 
      dtype='<U7')
In [897]:
sns.jointplot?
In [ ]:
 
In [ ]:
 
In [729]:
sizedata[::10]
Out[729]:
IDChordr14RADecDprimePA
154-2400.60567146710619.48053979195:35:15.374-5:22:39.6646.1530184606339.352492803
174-3050.07789927861632.505516735165:35:17.368-5:23:04.71522.609238452636.6746518612
149-3290.09482644458043.049954351995:35:14.914-5:23:28.99523.9457901205255.126441327
177-341E0.04233254936171.361564737855:35:17.736-5:23:41.06526.3212791211133.79527876
190-2510.1379422113664.436710136495:35:19.0345-5:22:50.46550.225115768749.8522092634
158-3260.1726037583475.551548265575:35:15.824-5:23:25.5659.93256234868254.127891815
174-4140.2695430296438.669458608765:35:17.385-5:24:13.9752.9402401377164.938002454
In [ ]:
 

In [719]:
coord.SkyCoord.to_string?
In [720]:
coords0[:4].to_string('hmsdms')
Out[720]:
['05h35m15.374s -05d22m39.66s',
 '05h35m16.332s -05d22m48.795s',
 '05h35m16.4725s -05d22m35.255s',
 '05h35m16.4835s -05d22m34.97s']
In [721]:
coord.Angle.to_string?
In [723]:
coords0[:4].ra.to_string(unit=u.hourangle)
Out[723]:
array(['5h35m15.374s', '5h35m16.332s', '5h35m16.4725s', '5h35m16.4835s'], dtype=object)
In [1484]:
plt.scatter?
In [1618]:
sns.jointplot?
In [79]:
import matplotlib
In [583]:
matplotlib.markers?
In [1515]:
plt.colorbar?
In [93]:
f.axes[0] == fig._ax1
Out[93]:
True
In [242]:
r = np.random.RandomState(5)
In [243]:
r.random_sample(10)
Out[243]:
array([ 0.22199317,  0.87073231,  0.20671916,  0.91861091,  0.48841119,
        0.61174386,  0.76590786,  0.51841799,  0.2968005 ,  0.18772123])
In [182]:
np.r_[[1, 2, 3], 0, 0, [1, 2]]
Out[182]:
array([1, 2, 3, 0, 0, 1, 2])
In [244]:
import seaborn
In [249]:
seaborn.palettes.dark_palette('red', as_cmap=True)
Out[249]:
<matplotlib.colors.LinearSegmentedColormap at 0x111cb7490>
In [1519]:
sns.palplot(sns.color_palette("RdPu", 5))
In [1520]:
sns.color_palette("RdPu", 5)
Out[1520]:
[(0.99091118854634908, 0.84479816380669093, 0.83054210929309624),
 (0.98300653696060181, 0.67320263385772705, 0.72418302297592163),
 (0.96702806809369257, 0.40464437300083683, 0.63075742300818949),
 (0.80522876977920532, 0.13725490386908254, 0.55947714050610864),
 (0.54320646594552435, 0.0039215688593685627, 0.47538639131714316)]
In [1525]:
sns.__version__
Out[1525]:
'0.3.1'
In [1485]:
matplotlib.collections.Collection?